path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
ipynb/BASNet_test.ipynb
###Markdown ================================我是分隔線====================================== BASNet ###Code # %cd drive/MyDrive/TibaMe/'Phantom Captcher' # !git clone https://github.com/NathanUA/BASNet.git # %cd /content/drive/MyDrive/TibaMe/'Phantom Captcher'/BASNet !python /content/drive/MyDrive/TibaMe/'Phantom Captcher'/BASNet/basnet_train.py # %cd /content/ # %cd /content/drive/MyDrive/TibaMe/'Phantom Captcher'/BASNet !python /content/drive/MyDrive/TibaMe/'Phantom Captcher'/BASNet/basnet_test.py # %cd /content/ ###Output _____no_output_____
Logistic-Regression.ipynb
###Markdown CSGO Match Prediction with Logistic Regression ###Code import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd from sklearn import datasets, linear_model from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split %matplotlib inline ###Output _____no_output_____ ###Markdown Loading Data ###Code df = pd.read_csv('./economy.csv', low_memory=False) winners = df.T.apply(pd.Series.last_valid_index) # Get winner of the match winners_col = [] # Creation of the winners column for the dataset winnerarr = np.array(winners) for i in range(df.shape[0]): winners_col.append(df[winnerarr[i]][i]) #winners_col.append(df["team_" + str(int(df[winnerarr[i]][i]))][i]) df['match_winner'] = winners_col round_win_col_names = [ # Rounds we are not parsing, we only care about rounds until halftime '16_winner', '17_winner', '18_winner', '19_winner', '20_winner', '21_winner', '22_winner', '23_winner', '24_winner', '25_winner', '26_winner', '27_winner', '28_winner', '29_winner', '30_winner'] df = df.drop(columns=round_win_col_names) df = df.drop(columns=["best_of", "date", "t2_start"]) df = df.drop(columns=["match_id", "event_id"]) droprounds = ['16_t1', '17_t1', '18_t1', '19_t1', '20_t1', '21_t1', '22_t1', '23_t1', '24_t1', '25_t1', '26_t1', '27_t1', '28_t1', '29_t1', '30_t1', '16_t2', '17_t2', '18_t2', '19_t2', '20_t2', '21_t2', '22_t2', '23_t2', '24_t2', '25_t2', '26_t2', '27_t2', '28_t2', '29_t2', '30_t2'] df = df.drop(columns=droprounds) colnames = list(df.columns) #print(colnames) xnames = colnames[:len(colnames) - 1] #print(xnames) X = df[xnames] set_of_teams = set(list(X['team_1']) + list(X['team_2'])) team_to_num = dict(zip(set_of_teams, range(len(set_of_teams)))) #Replace team's name with their number X['team_1'] = X['team_1'].replace(team_to_num) X['team_2'] = X['team_2'].replace(team_to_num) y = df["match_winner"] #Convert from t1_start to t1_t and convert from t,ct to binary X['t1_t'] = X['t1_start'].apply(lambda x: 1 if x == 't' else 0) X = X.drop('t1_start', axis=1) #Replace map names with numbers map_to_num = dict(zip(X['_map'].unique(), range(len(X['_map'].unique())))) X['_map'] = X['_map'].replace(map_to_num) X y ###Output _____no_output_____ ###Markdown *** One-Hot Conversion (Encoding Teams) ###Code # # Convert the team names back into strings again # import gc # num_to_team = dict(zip(range(len(set_of_teams)), set_of_teams)) # all_teams = [num_to_team[i] for i in range(len(set_of_teams))] # # Extract the team data from the original DataFrame # team_1_arr = np.array(X["team_1"]) # team_2_arr = np.array(X["team_2"]) # # Create the one-hot vectors for each row of the data, one for each team # one_hot_t1 = np.zeros((team_1_arr.size, len(num_to_team))) # one_hot_t1[np.arange(team_1_arr.size), team_1_arr] = 1 # one_hot_t2 = np.zeros((team_2_arr.size, len(num_to_team))) # one_hot_t2[np.arange(team_2_arr.size), team_2_arr] = 1 # # Combine the matrix of one-hot vectors for team_1 and team_2 together to create a single matrix # one_hot = np.hstack((one_hot_t1, one_hot_t2)) # # Turn the numpy array into a pandas DataFrame and add it to the dataset # one_hot_df = pd.DataFrame(one_hot, columns = ["team_1_%s" %(i) for i in all_teams] + ["team_2_%s" %(i) for i in all_teams]) # X = X.drop(columns = ["team_1", "team_2"]) # X = X.join(one_hot_df) # # Freeing memory for training and test dataset splits # del team_1_arr # del team_2_arr # del num_to_team # del all_teams # del one_hot_t1 # del one_hot_t2 # del one_hot # del one_hot_df # gc.collect() ###Output _____no_output_____ ###Markdown *** ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) scaler = StandardScaler() X_train= scaler.fit_transform(X_train) X_test = scaler.transform(X_test) ###Output _____no_output_____ ###Markdown LogReg w/ No Regularization ###Code logreg_ridge_none = linear_model.LogisticRegression(penalty='none') # No regularization logreg_ridge_none.fit(X_train, y_train) yhat_train = logreg_ridge_none.predict(X_train) yhat = logreg_ridge_none.predict(X_test)# the predict method will return 0 or 1 acc = np.mean(yhat == y_test) acc_train = np.mean(yhat_train == y_train) print("Accuracy on training data = %f" % acc_train) print("Accuracy on test data = %f" % acc) print("The regularization parameter:", logreg_ridge_none.C) print('Accuracy on the test data is {0:f}'.format(acc)) W_l1 = logreg_ridge_none.coef_ data = {'feature': xnames, 'slope': np.squeeze(W_l1)} dfslope = pd.DataFrame(data=data) dfslope # For the test data we can see the number of tpr, fpr, fnr, and tnr in the confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(yhat, y_test) from sklearn.datasets import make_classification from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score cv = KFold(n_splits=10, random_state=1, shuffle=True) scores = cross_val_score(logreg_ridge_none, X, y, scoring='accuracy', cv=cv, n_jobs=-1) print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores))) from sklearn.metrics import precision_recall_fscore_support # Find Precision, recall and fscore using precision_recall_fscore_support nethod of sklearn # Using y_train and y_hat_logreg prec,recal,fscore,_= precision_recall_fscore_support(y_test,yhat,average='binary') print('prec: ', prec) print('recal: ', recal) print('fscore: ', fscore) ###Output prec: 0.7904057771664375 recal: 0.7936809392265194 fscore: 0.792039972432805 ###Markdown L1 Regularization ###Code logreg_ridge_l1 = linear_model.LogisticRegression(solver='liblinear', penalty='l1',warm_start=True, C = 0.001) # L1 regularization logreg_ridge_l1.fit(X_train, y_train) yhat_l1 = logreg_ridge_l1.predict(X_test) # the predict method will return 0 or 1 acc = np.mean(yhat_l1 == y_test) print("Accuracy on test data = %f" % acc) print("The regularization parameter:", logreg_ridge_l1.C) print('Accuracy on the test data is {0:f}'.format(acc)) W_l1 = logreg_ridge_l1.coef_ data = {'feature': xnames, 'slope': np.squeeze(W_l1)} dfslope = pd.DataFrame(data=data) dfslope # For the test data we can see the number of tpr, fpr, fnr, and tnr in the confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(yhat_l1, y_test) from sklearn.datasets import make_classification from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score cv = KFold(n_splits=10, random_state=1, shuffle=True) scores = cross_val_score(logreg_ridge_l1, X, y, scoring='accuracy', cv=cv, n_jobs=-1) print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores))) from sklearn.metrics import precision_recall_fscore_support # Find Precision, recall and fscore using precision_recall_fscore_support nethod of sklearn # Using y_train and y_hat_logreg prec,recal,fscore,_= precision_recall_fscore_support(y_test,yhat_l1,average='binary') print('prec: ', prec) print('recal: ', recal) print('fscore: ', fscore) ###Output prec: 0.7994685562444641 recal: 0.7791781767955801 fscore: 0.789192970184489 ###Markdown L2 Regularization ###Code logreg_ridge_l2 = linear_model.LogisticRegression(penalty='l2', C = 0.001) # L2 regularization logreg_ridge_l2.fit(X_train, y_train) yhat_l2 = logreg_ridge_l2.predict(X_test) # the predict method will return 0 or 1 acc = np.mean(yhat_l2 == y_test) print("Accuracy on test data = %f" % acc) print("The regularization parameter:", logreg_ridge_l2.C) print('Accuracy on the test data is {0:f}'.format(acc)) W_l1 = logreg_ridge_l2.coef_ data = {'feature': xnames, 'slope': np.squeeze(W_l1)} dfslope = pd.DataFrame(data=data) dfslope # For the test data we can see the number of tpr, fpr, fnr, and tnr in the confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix(yhat_l2, y_test) from sklearn.datasets import make_classification from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score cv = KFold(n_splits=10, random_state=1, shuffle=True) scores = cross_val_score(logreg_ridge_l2, X, y, scoring='accuracy', cv=cv, n_jobs=-1) print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores))) from sklearn.metrics import precision_recall_fscore_support # Find Precision, recall and fscore using precision_recall_fscore_support nethod of sklearn # Using y_train and y_hat_logreg prec,recal,fscore,_= precision_recall_fscore_support(y_test,yhat_l2,average='binary') print('prec: ', prec) print('recal: ', recal) print('fscore: ', fscore) ###Output prec: 0.7829091521995618 recal: 0.8019682320441989 fscore: 0.7923240938166312 ###Markdown *** Polynomial Transformation Fit ###Code # Import useful polynomial library import numpy.polynomial.polynomial as poly ###Output _____no_output_____ ###Markdown Loading Data ###Code df = pd.read_csv('./economy.csv', low_memory=False) winners = df.T.apply(pd.Series.last_valid_index) winners_col = [] winnerarr = np.array(winners) for i in range(df.shape[0]): winners_col.append(df[winnerarr[i]][i]) #winners_col.append(df["team_" + str(int(df[winnerarr[i]][i]))][i]) df['match_winner'] = winners_col round_win_col_names = [ '16_winner', '17_winner', '18_winner', '19_winner', '20_winner', '21_winner', '22_winner', '23_winner', '24_winner', '25_winner', '26_winner', '27_winner', '28_winner', '29_winner', '30_winner'] df = df.drop(columns=round_win_col_names) df = df.drop(columns=["best_of", "date", "t2_start"]) df = df.drop(columns=["match_id", "event_id"]) droprounds = ['16_t1', '17_t1', '18_t1', '19_t1', '20_t1', '21_t1', '22_t1', '23_t1', '24_t1', '25_t1', '26_t1', '27_t1', '28_t1', '29_t1', '30_t1', '16_t2', '17_t2', '18_t2', '19_t2', '20_t2', '21_t2', '22_t2', '23_t2', '24_t2', '25_t2', '26_t2', '27_t2', '28_t2', '29_t2', '30_t2'] df = df.drop(columns=droprounds) df = df.sample(frac = .55) colnames = list(df.columns) #print(colnames) xnames = colnames[:len(colnames) - 1] #print(xnames) X = df[xnames] set_of_teams = set(list(X['team_1']) + list(X['team_2'])) team_to_num = dict(zip(set_of_teams, range(len(set_of_teams)))) #Replace team's name with their number X['team_1'] = X['team_1'].replace(team_to_num) X['team_2'] = X['team_2'].replace(team_to_num) y = df["match_winner"] #Convert from t1_start to t1_t and convert from t,ct to binary X['t1_t'] = X['t1_start'].apply(lambda x: 1 if x == 't' else 0) X = X.drop('t1_start', axis=1) #Replace map names with numbers map_to_num = dict(zip(X['_map'].unique(), range(len(X['_map'].unique())))) X['_map'] = X['_map'].replace(map_to_num) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) scaler = StandardScaler() X_train= scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from sklearn.metrics import mean_squared_error from sklearn.preprocessing import PolynomialFeatures from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report import gc rmses = [] degrees = [1, 2, 3] min_rmse, min_deg = 1e10, 0 for deg in degrees: # Train features poly_features = PolynomialFeatures(degree=deg, include_bias=False) x_poly_train = poly_features.fit_transform(X_train) # Logistic regression poly_reg = linear_model.LogisticRegression(penalty='l2', C = 0.01) poly_reg.fit(x_poly_train, y_train) # Compare with test data x_poly_test = poly_features.fit_transform(X_test) print("Fit Deg: ", deg) predict_train = poly_reg.predict(x_poly_train) predict_test = poly_reg.predict(x_poly_test) print("Predict Deg: ", deg) poly_mse = mean_squared_error(y_test, predict_test) poly_rmse = np.sqrt(poly_mse) rmses.append(poly_rmse) print("Training Set Results") print(confusion_matrix(y_train,predict_train)) print(classification_report(y_train,predict_train)) print(confusion_matrix(y_test,predict_test)) print(classification_report(y_test,predict_test)) # Cross-validation of degree if min_rmse > poly_rmse: min_rmse = poly_rmse min_deg = deg del poly_features del x_poly_train del poly_reg del x_poly_test del predict_test del predict_train gc.collect() # Plot and present results print('Best degree {} with RMSE {}'.format(min_deg, min_rmse)) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(degrees, rmses) ax.set_yscale('log') ax.set_xlabel('Degree') ax.set_ylabel('RMSE') # from sklearn.preprocessing import PolynomialFeatures # # create transform # trans = PolynomialFeatures(degree=min_deg) # # fit and transform # X = trans.fit_transform(X) # print('Degree: %d, Features: %d' % (min_deg, X.shape[1])) ###Output _____no_output_____ ###Markdown Logistic Regression ###Code import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns import nltk # for text manipulation from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, train_test_split # from sklearn.pipeline import Pipeline from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import f1_score, roc_auc_score, roc_curve, average_precision_score, precision_recall_curve #keep the same random state for reproducibility RANDOM_STATE = 12 TRAIN_TEST_SPLIT_SIZE = .1 ###Output _____no_output_____ ###Markdown **Logistic regression**Logistic Regression is a classification algorithm. It is used to predict a binary outcome (1 / 0, Yes / No, True / False) given a set of independent variables. We can also think of logistic regression as a special case of linear regression when the outcome variable is categorical, where we are using log of odds as the dependent variable. In simple words, it predicts the probability of occurrence of an event by fitting data to a logit function.For this part I will perform the following steps:- Evaluate the logistic regression- Tune the regularization strength parameter with cross-validated grid-search for each type of embeddings- Check the predictions against the actual values *****Utility Functions***** In order to evaluate the performance of the model, I will take a look at the following metrics:- Precision: the fraction of relevant instances among the retrieved instances.- Recall: the fraction of the total amount of relevant instances that were actually retrieved.- F1 Score: harmonic mean between precision and recall. Since it is difficult to compare two models with low precision and high recall or vice versa, we make them comparable by using the F-Score. F-score helps to measure Recall and Precision at the same time. It uses harmonic mean in place of arithmetic mean by punishing the extreme values more. The F1 score is highly influenced by which class is labeled as positive. - Confusion Matrix: a summary table that breaks down the number of correct and incorrect predictions by each class. For imbalanced classification problems, the majority class is typically referred to as the negative outcome (e.g. such as “no change” or “negative test result“), and the minority class is typically referred to as the positive outcome (e.g. “change” or “positive test result”). The confusion matrix provides more insight into not only the performance of a predictive model, but also which classes are being predicted correctly, which incorrectly, and what type of errors are being made. The simplest confusion matrix is for a two-class classification problem, with negative (class 0) and positive (class 1) classes. I will reverse the positive and the negative labels in the calculation of the confusion matrix to reflect my actual labeling of the classes (majority class is label 0 and minority class is label 1).- ROC: a plot that illustrates the true positive rate against the false positive rate at various threshold settings. The area under the curve (AUC) indicates the probability that the classifier will rank a randomly chosen positive observation higher than a randomly chosen negative one. ###Code mpl.rcParams['figure.figsize'] = (12, 10) colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] def plot_cfm(labels, predictions, p = 0.5): cfm = metrics.confusion_matrix(labels, predictions > p) plt.figure(figsize = (5,5)) sns.heatmap(cfm, annot = True, fmt = "d") plt.title('Confusion matrix @{:.2f}'.format(p)) plt.ylabel('Actual values') plt.xlabel('Predicted values') print('True Negatives: ', cfm[0][0]) print('False Positives: ', cfm[0][1]) print('False Negatives: ', cfm[1][0]) print('True Positives: ', cfm[1][1]) def plot_roc(name, labels, predictions, **kwargs): fp, tp, _ = roc_curve(labels, predictions) roc_auc = metrics.auc(fp, tp) plt.title('Receiver Operating Characteristic') plt.plot(fp, tp, label = name + ' AUC = %0.2f' % roc_auc, linewidth=2, **kwargs) plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1], linewidth=2, color=colors[6], linestyle=':') plt.xlabel('False positives [%]') plt.ylabel('True positives [%]') plt.grid(True) ax = plt.gca() ax.set_aspect('equal') ###Output _____no_output_____ ###Markdown *****TF-IDF***** **Prepare the datasets**Here we will use the TF-IDF extracted features and combine again the train and validation data set, since we will use GridSeachCV with built-in cross-validation to look for the best parameters. ###Code train_tfidf_features = np.load('train_dataset_tfidf_features.npy', allow_pickle=True) train_tfidf_labels = np.load('train_dataset_tfidf_labels.npy', allow_pickle=True) test_tfidf_features = np.load('test_dataset_tfidf_features.npy', allow_pickle=True) test_tfidf_labels = np.load('test_dataset_tfidf_labels.npy', allow_pickle=True) val_tfidf_features = np.load('val_dataset_tfidf_features.npy', allow_pickle=True) val_tfidf_labels = np.load('val_dataset_tfidf_labels.npy', allow_pickle=True) # Create X/y arrays x_train_tfidf_u = train_tfidf_features.item() y_train_tfidf_u = train_tfidf_labels print('x:', x_train_tfidf_u.shape, x_train_tfidf_u.dtype) print('y:', y_train_tfidf_u.shape, y_train_tfidf_u.dtype) # Create X/y arrays x_test_tfidf = test_tfidf_features.item() y_test_tfidf = test_tfidf_labels print('x:', x_test_tfidf.shape, x_test_tfidf.dtype) print('y:', y_test_tfidf.shape, y_test_tfidf.dtype) # Create X/y arrays x_val_tfidf = val_tfidf_features.item() y_val_tfidf = val_tfidf_labels print('x:', x_val_tfidf.shape, x_val_tfidf.dtype) print('y:', y_val_tfidf.shape, y_val_tfidf.dtype) import scipy x_train_tfidf = scipy.sparse.vstack([x_train_tfidf_u, x_val_tfidf]) y_train_tfidf = np.hstack((y_train_tfidf_u, y_val_tfidf)) print('x:', x_train_tfidf.shape, x_train_tfidf.dtype) print('y:', y_train_tfidf.shape, y_train_tfidf.dtype) ###Output x: (68383, 25000) float64 y: (68383,) int64 ###Markdown Normally, at this stage I would normalize the input features using the sklearn StandardScaler. This will set the mean to 0 and standard deviation to 1. However, "smart" vectorizers, such as tfidf are actually already doing that. The idf transformation is supposed to create a kind of reasonable data scaling. There is no guarantee which one will be better, but in general, tfidf should be enough. **Hyperparameter Tuning**The logistic regression does not really have any critical hyperparameters to tune.The C parameter controls the penality strength. Typically is a positive floating-point number (1.0 by default) that defines the relative strength of regularization. Smaller values indicate stronger regularization, bigger values indicate a weaker regularization. I will try with a very strong and a very weak regularization strength, and pass ten C values on a log scale between 10^(-4) and 10^4. I will also set the cv strategy to 5-fold cross-validation, which is stratified by default for classification tasks. cv=5 is also the standard value, which has been shown to yield test error rate estimates that suffer neither from excessively high bias nor from very high variance.I also use roc_auc and f1 as scoring, but refit on f1. **Solver Options**Scikit-learn ships with five different solvers. Each solver tries to find the parameter weights that minimize a cost function. Here is a brief overview of the five options:- newton-cg — A newton method. Newton methods use an exact Hessian matrix. It's slow for large datasets, because it computes the second derivatives.- lbfgs — Stands for Limited-memory Broyden–Fletcher–Goldfarb–Shanno. It approximates the second derivative matrix updates with gradient evaluations. It stores only the last few updates, so it saves memory. It isn't super fast with large data sets. It will be the default solver as of Scikit-learn version 0.22.0.- liblinear — Library for Large Linear Classification. Uses a coordinate descent algorithm. Coordinate descent is based on minimizing a multivariate function by solving univariate optimization problems in a loop. In other words, it moves toward the minimum in one direction at a time. It is the default solver for Scikit-learn versions earlier than 0.22.0. It performs pretty well with high dimensionality, but it does have a number of drawbacks though. It can get stuck, and it is unable to run in parallel.- sag — Stochastic Average Gradient descent. A variation of gradient descent and incremental aggregated gradient approaches that uses a random sample of previous gradient values. Fast for big datasets.- saga — Extension of sag that also allows for L1 regularization. Should generally train faster than sag. ###Code SCORING = {'Accuracy': 'accuracy', 'F1': 'f1'} C_SPACE = np.logspace(-2, 4, num = 10) PARAMETERS = { 'C': C_SPACE, 'solver': ['lbfgs', 'liblinear', 'saga'], } MAX_ITER = 1000 lr = LogisticRegression(max_iter = MAX_ITER) gs = GridSearchCV(lr, PARAMETERS, cv = 5, scoring = SCORING, refit = 'F1', return_train_score = True, verbose = 3, n_jobs = -1) gs.fit(x_train_tfidf, y_train_tfidf) print(gs.best_params_) print(gs.best_score_) print(gs.best_estimator_) logreg = gs.best_estimator_ # Fit it to train data logreg.fit(x_train_tfidf, y_train_tfidf) y_pred_test_tfidf = logreg.predict(x_test_tfidf) y_probas_train_tfidf = logreg.decision_function(x_train_tfidf) y_probas_test_tfidf = logreg.decision_function(x_test_tfidf) plot_cfm(y_test_tfidf, y_pred_test_tfidf) print(classification_report(y_test_tfidf, y_pred_test_tfidf)) f1_tfidf = f1_score(y_test_tfidf, y_pred_test_tfidf, average = 'weighted') print('F1: %.2f' % f1_tfidf) auc_tfidf = roc_auc_score(y_test_tfidf, y_probas_test_tfidf) print('AUC: %.2f' % auc_tfidf) ###Output AUC: 0.99 ###Markdown AUC - ROC curve is a performance measurement for classification problem at various thresholds settings. ROC is a probability curve and AUC represents degree or measure of separability. It tells how much model is capable of distinguishing between classes. Higher the AUC, better the model is at predicting 0's as 0's and 1's as 1's. ###Code plot_roc("Train", y_train_tfidf, y_probas_train_tfidf, color = colors[0]) plot_roc("Test", y_test_tfidf, y_probas_test_tfidf, color = colors[0], linestyle = '--') ###Output _____no_output_____ ###Markdown The AUC score for the test data set as well as the AUC - ROC curve are very consistent with respect to the training data set, and the model performs very well. ###Code import joblib # save joblib.dump(logreg, "./logreg.joblib") # Saving model to current directory # Pickle serializes objects so they can be saved to a file, and loaded in a program again later on. import pickle pickle.dump(logreg, open('logreg.pkl','wb')) #Loading model to compare the results #model = pickle.load(open('model.pkl','rb')) ###Output _____no_output_____ ###Markdown Load Data ###Code import numpy as np, pandas as pd, os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head() ###Output _____no_output_____ ###Markdown Logistic Regression without interactions ###Code cols = [c for c in train.columns if c not in ['id', 'target']] oof = np.zeros(len(train)) skf = StratifiedKFold(n_splits=5, random_state=42) for train_index, test_index in skf.split(train.iloc[:,1:-1], train['target']): clf = LogisticRegression(solver='liblinear',penalty='l2',C=1.0) clf.fit(train.loc[train_index][cols],train.loc[train_index]['target']) oof[test_index] = clf.predict_proba(train.loc[test_index][cols])[:,1] auc = roc_auc_score(train['target'],oof) print('LR without interactions scores CV =',round(auc,5)) # INITIALIZE VARIABLES cols.remove('test-data') interactions = np.zeros((512,255)) oof = np.zeros(len(train)) preds = np.zeros(len(test)) # BUILD 512 SEPARATE MODELS for i in range(512): # ONLY TRAIN WITH DATA WHERE TEST-DATA EQUALS I train2 = train[train['test-data']==i] test2 = test[test['test-data']==i] idx1 = train2.index; idx2 = test2.index train2.reset_index(drop=True,inplace=True) test2.reset_index(drop=True,inplace=True) skf = StratifiedKFold(n_splits=25, random_state=42) for train_index, test_index in skf.split(train2.iloc[:,1:-1], train2['target']): # LOGISTIC REGRESSION MODEL clf = LogisticRegression(solver='liblinear',penalty='l1',C=0.05) clf.fit(train2.loc[train_index][cols],train2.loc[train_index]['target']) oof[idx1[test_index]] = clf.predict_proba(train2.loc[test_index][cols])[:,1] preds[idx2] += clf.predict_proba(test2[cols])[:,1] / 25.0 # RECORD INTERACTIONS for j in range(255): if clf.coef_[0][j]>0: interactions[i,j] = 1 elif clf.coef_[0][j]<0: interactions[i,j] = -1 #if i%25==0: print(i) # PRINT CV AUC auc = roc_auc_score(train['target'],oof) print('LR with interactions scores CV =',round(auc,5)) ###Output _____no_output_____ ###Markdown Submit Predictions ###Code sub = pd.read_csv('../input/sample_submission.csv') sub['target'] = preds sub.to_csv('submission.csv',index=False) ###Output _____no_output_____ ###Markdown Logistic Regression: Introduction:Logistic regression is a linear classifier which belongs to the family of discriminative machine learning models. It tries to learn $P(y|x)$ from data and does prediction following a linear threshold unit i.e.$$h(x) = \begin{cases} 1 & w_{1}x_{1}+w_{2}x_{2}+..+w_{d}x_{d}\geqslant 0 \\ 0 & otherwise \end{cases}$$ Logistic regression learns the weight of each feature in the data set and uses sigmoid function to transform the predicted values into probabilities. A linear function can have a range of (${-\infty,+\infty}$) but it is transformed to [0,1] using the sigmoid function.$$sigmoid(wx) = \frac{1}{1+ e^{-wx}}$$The cost function for logistic regresion is given by:$$J(w) = \sum_{i=1}^{N} (1-y_{i})\log(1-p(x,w) + y_{i}p(x,w)$$the gradient is given by:$$\frac{\partial J(w)}{\partial w_{j}} = \sum_{i=1}^{N}(y_{i}-p(x,w))x_{ij}$$and the weight update is done:$$w_{t+1} = w_{t} + \alpha * gradient$$where, $\alpha$ is the learning rate. Python Implementation:**Let us first implement few functions that we will need before implementing logistic regression** ###Code 'Import libraries' from __future__ import division import numpy as np import pandas as pd from collections import defaultdict import copy import time from sklearn.cross_validation import ShuffleSplit from sklearn.preprocessing import StandardScaler from itertools import izip 'Utils functions' def paddingData(data): ''' :param data: Data to be paded :return: Padded data with value 1 in the first column ''' return np.c_[np.ones(data.shape[0]), data] def setWeights(numFeat): ''' :param numFeat: Total number of features in data :return: vector of ones of length equal to number of features in data ''' return np.ones(numFeat).reshape((numFeat, 1)) def splitData(test_size,cv, numpoints): #This function from sklearn takes the length of the data and test size and returns bootstrapped indices #depending on how many boostraps are required ''' :param test_size: size of the test data required (value between 0 and 1). :param cv: Number of re-shuffling. :param numpoints: Total number of data points. :return: indices of the shuffled splits. ''' ss = ShuffleSplit(n=numpoints, n_iter=cv, test_size=test_size, random_state=32) return ss def calAccuracy(pred,ytest): ''' :param pred: vector containing all the predicted classes :param ytest: vector containing all the true classes :return: accuracy of classification ''' count = 0 for i,j in izip(pred,ytest): if i==j: count +=1 return count/(len(ytest)) def sigmoid(a): ''' :param a: vector (w.x) :return: sigmoid transfer of the value ''' return 1/(1+np.exp(-a)) ###Output _____no_output_____ ###Markdown Logistic Regression: ###Code 'Implements logistic regression' class logisticregression(): def __init__(self,tol=0.0001,alpha = 0.01): ''' :param weights: weight vector :param tol: tolerance with the default value of 0.0001 :param alpha: learning rate with the default value of 0.01 ''' self.weights = None self.tolerance = tol self.alpha = alpha def fit(self,Xtrain,ytrain): 'Start time' start = time.time() 'Padding of input data' Xtrain = paddingData(Xtrain) self.weights = setWeights(Xtrain.shape[1]) 'save the number passes over data' run = 0 while True: run +=1 'predict using the current weight' predict = np.dot(Xtrain,self.weights) 'calculate the probability of data point belonging to class 1(in case of binary)' prob = sigmoid(predict) 'calculate the error' error = ytrain - prob gradient = np.dot(error.T ,Xtrain) / ytrain.shape[0] temp = self.weights + self.alpha* gradient.T step = np.linalg.norm(np.subtract(self.weights, temp)) self.weights = temp if step < self.tolerance: break end = time.time() print 'Time taken to fit data:',end-start print 'Number of passes over data:', run def predict(self,Xtest): 'Pad the test data' Xtest = paddingData(Xtest) 'predict using the learned weights and convert it into probability' pred = sigmoid(Xtest.dot(self.weights)) pred[pred > 0.5 ] = 1 pred[pred <= 0.5] = 0 return pred ###Output _____no_output_____ ###Markdown Load the data set ###Code from sklearn.datasets import load_breast_cancer ''' Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ''' breastcancer = load_breast_cancer() numSamples, numFeat = breastcancer.data.shape ss = splitData(test_size=0.25,cv=1, numpoints=numSamples) for train_index, test_index in ss: Xtrain = breastcancer.data[train_index, :] ytrain = breastcancer.target[train_index].reshape((train_index.shape[0], 1)) Xtest = breastcancer.data[test_index, :] ytest = breastcancer.target[test_index].reshape((test_index.shape[0], 1)) 'Normalize the data to zero mean and unit variance' scalar = StandardScaler() Xtrain = scalar.fit_transform(Xtrain) Xtest = scalar.transform(Xtest) ###Output _____no_output_____ ###Markdown Running our algorithm:I have also compared the performance of our logistic regression implementation with sklearn logistic regression as a sanity check. ###Code clf = logisticregression() clf.fit(Xtrain,ytrain) pred = clf.predict(Xtest) print 'Accuracy:',calAccuracy(pred,ytest)*100 from sklearn.linear_model import LogisticRegression lgr = LogisticRegression() lgr.fit(Xtrain,ytrain) p = lgr.predict(Xtest) print 'Accuracy:',calAccuracy(p,ytest)*100 ###Output Accuracy: 99.3006993007 ###Markdown Download MNIST Data Set and load into those variables ###Code (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() fig, axes= plt.subplots(1,4, figsize=(7,3)) for img, label, ax in zip(x_train[:4], y_train[:4], axes): ax.set_title(label) ax.imshow(img) ax.axis('off') plt.show() ###Output _____no_output_____ ###Markdown We must flatten the images and scale them from 0-1 ###Code x_train = x_train.reshape(60000, 784) / 255 x_test = x_test.reshape(10000, 784) / 255 ###Output _____no_output_____ ###Markdown Create a one hot-array for the y-values Creates an array of 10 elements ###Code with tf.Session() as sesh: y_train = sesh.run(tf.one_hot(y_train, 10)) y_test = sesh.run(tf.one_hot(y_test, 10)) y_train[:4] # hyper parameters learning_rate = 0.01 epochs = 50 # Divide the total number of pictues by the batch size to get num of batches batch_size = 100 batches = int(x_train.shape[0] / batch_size) ###Output _____no_output_____ ###Markdown Y is a 10 element list. x is a 784 element long list since we flattened it. w is a matrix of size 784 x 10. b is a 10 element matrix. y = wx + b Inputs: X is the "flattened / normalized: images Y is the "one hot' labels ###Code X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) W = tf.Variable(0.1 * np.random.randn(784, 10).astype(np.float32)) B = tf.Variable(0.1 * np.random.randn(10).astype(np.float32)) ###Output _____no_output_____ ###Markdown Softmax function converts all prediction scores to probabilities and makes the sum of the probabilities equal to 1. ###Code pred = tf.nn.softmax(tf.add(tf.matmul(X,W), B)) cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(pred), axis=1)) #tf.log() is the natrual log optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) ###Output _____no_output_____ ###Markdown Explanation for Cost Function: $C = \sum -Y\ln(pred)$ What the natural log function looks like ###Code x = np.linspace(1/100, 1, 100) #(start,stop, num) plt.plot(x, np.log(x)) # Natural log plt.show() ###Output _____no_output_____ ###Markdown Example Data: ###Code a = np.log([[0.04, 0.13 ,0.96, 0.12], #Top array is a correct prediction, bottom array is incorret [0.01, 0.93, 0.06, 0.07]]) # pred b = np.array([[ 0, 0, 1, 0], [ 1, 0, 0, 0]]) # labels - a * b ###Output _____no_output_____ ###Markdown When you take the log of a value close to 1 you get a number close to 0, when that is multiplied by the label array you should get a negative value close to 0. If the prediction is incorrect, youll take the log of a low number and get a big negative number, which when multiplied by 1 and the negative in the cost function, returns a big positive number. Correct prediction should be a low value, incorrect prediction should be a high value. The code below is the same as -tf.reduce_sum(Y * tf.log(pred), axis=1). Reduce sum is just the sum really and reduce man is just the mean.We take the average to determine the overal cost. If predictions are correct, they will be closer to 0, so the mean will be lower. More incorrect predictions will raise the mean. Using this function confident wrong predictions are penalized more than confident right predictions are rewarded ###Code r_sum = np.sum(-a * b, axis =1) r_mean = np.mean(r_sum) print(r_sum) print(r_mean) #Cost with tf.Session() as sesh: sesh.run(tf.global_variables_initializer()) for epoch in range(epochs): for i in range(batches): x = x_train[i * batch_size: (i + 1) * batch_size] # Loop through all the bathes every epoch y = y_train[i * batch_size: (i + 1) * batch_size] sesh.run(optimizer, feed_dict = {X: x, Y: y}) c = sesh.run(cost, feed_dict={X: x, Y: y}) if not epoch % 1: print(f'epoch:{epoch} cost={c:.4f}') correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Loop through test data to determine accuracy acc = accuracy.eval({X: x_test, Y: y_test}) print("Final Accuracy: ",acc * 100) ###Output epoch:0 cost=0.9574 epoch:1 cost=0.7036 epoch:2 cost=0.5961 epoch:3 cost=0.5347 epoch:4 cost=0.4946 epoch:5 cost=0.4661 epoch:6 cost=0.4448 epoch:7 cost=0.4282 epoch:8 cost=0.4149 epoch:9 cost=0.4041 epoch:10 cost=0.3950 epoch:11 cost=0.3873 epoch:12 cost=0.3807 epoch:13 cost=0.3749 epoch:14 cost=0.3699 epoch:15 cost=0.3654 epoch:16 cost=0.3614 epoch:17 cost=0.3577 epoch:18 cost=0.3545 epoch:19 cost=0.3515 epoch:20 cost=0.3487 epoch:21 cost=0.3462 epoch:22 cost=0.3439 epoch:23 cost=0.3417 epoch:24 cost=0.3397 epoch:25 cost=0.3378 epoch:26 cost=0.3360 epoch:27 cost=0.3343 epoch:28 cost=0.3328 epoch:29 cost=0.3313 epoch:30 cost=0.3298 epoch:31 cost=0.3285 epoch:32 cost=0.3272 epoch:33 cost=0.3260 epoch:34 cost=0.3248 epoch:35 cost=0.3237 epoch:36 cost=0.3227 epoch:37 cost=0.3216 epoch:38 cost=0.3206 epoch:39 cost=0.3197 epoch:40 cost=0.3188 epoch:41 cost=0.3179 epoch:42 cost=0.3170 epoch:43 cost=0.3162 epoch:44 cost=0.3154 epoch:45 cost=0.3146 epoch:46 cost=0.3139 epoch:47 cost=0.3132 epoch:48 cost=0.3125 epoch:49 cost=0.3118 Final Accuracy: 91.82000160217285 ###Markdown Logistic regression tutorial ###Code ## Do **not** change this cell, and do **not** import ## any other modules anywhere in the notebook. import numpy as np import numpy.random as rn from scipy import optimize, stats import scipy.linalg as linalg import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown In this tutorial we're going to cover the basics behind logistic regression. For simplicity we will only consider the binary classification case, in which target variables are $y \in \{0,1\}$. In logistic regression, the probability of a data point $\boldsymbol x$ being of class 1 is given by$$p(y = 1 | \boldsymbol x, \boldsymbol\theta) = \sigma (\boldsymbol x^\top \boldsymbol\theta) ~ ,$$where $\sigma(z) = 1/(1+\exp(-z))$ is the _sigmoid_ function.Combining this with a Bernoulli likelihood and summing over all datapoints $\{\boldsymbol x_i, y_i\}_{i=1}^N$ we end up with a negative log-likelihood function that looks like this:$$-\log p(\boldsymbol y|\boldsymbol X, \boldsymbol\theta) = -\sum_i\left(y_i \log \sigma(\boldsymbol x_i^\top \boldsymbol\theta) + (1 - y_i) \log ( 1 - \sigma(\boldsymbol x_i^\top \boldsymbol\theta))\right)$$You will see this expression in many other classification problems, especially in deep learning, where it's known as the _cross-entropy loss_.Your goal in this tutorial is to learn how to perform inference over the parameters $\boldsymbol\theta$ in logistic regression, including point estimates $\boldsymbol\theta_{\mathrm{ML}}$ and $\boldsymbol\theta_{\mathrm{MAP}}$ and approximations to the posterior $p(\boldsymbol\theta | \boldsymbol X, \boldsymbol y)$.Let's do it. Maximum likelihood estimate Let's start easy. First, let's generate a toy 1D binary dataset with two paramaters:* A **jitter** parameter that controls how noisy the data are; and* An **offset** parameter that controls the separation between the two classes. ###Code # Data generation parameters N = 50 D = 2 jitter = 0.7 offset = 1.2 # Generate the data x = np.vstack([rn.normal(0, jitter, (N//2,1)), rn.normal(offset, jitter, (N//2,1))]) y = np.vstack([np.zeros((N//2, 1)), np.ones((N//2, 1))]) x_test = np.linspace(-2, offset + 2).reshape(-1,1) # Make the augmented data matrix by adding a column of ones x = np.hstack([np.ones((N,1)), x]) x_test = np.hstack([np.ones((N,1)), x_test]) ###Output _____no_output_____ ###Markdown Now on to the regression. First, let's code up the logistic log-likelihood as a separate function. This will come in handy.**Task 1*** Write a function to calculate the log-likelihood of a dataset given a value of $\boldsymbol\theta$. ###Code ## EDIT THIS FUNCTION def log_likelihood(X, y, theta): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # theta: parameters (D x 1) # returns: log likelihood, scalar theta = theta.reshape(-1,1) sigm = 1/(1+ np.exp(- X @ theta)) B = y * np.log(sigm) + (1-y) * (np.log(1-sigm)) L = np.sum(B) ## <-- EDIT THIS LINE return L ###Output _____no_output_____ ###Markdown Now it's time to optimize it to fit the maximum likelihood parameter,$$\boldsymbol\theta_{\mathrm{ML}} = \mathrm{arg}_\theta \max p(\boldsymbol y | \boldsymbol X, \boldsymbol\theta)$$For linear regression, the likelihood function had a closed-form minimum, which made our lives easy. Alas, that is not the case for logistic regression. We will have to resort to _numerical optimization_.In the lectures you saw how to derive the gradient and all that jazz. For this tutorial you can do it that way, or any other way you want. The optimization is convex, so this should be easy peasy.**Task 2*** Write a function to optimize the log-likelihood function you've written above an obtain $\boldsymbol\theta_{\mathrm{ML}}$. Use any optimizer of your choice. ###Code ## EDIT THIS FUNCTION def max_lik_estimate(X, y): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # returns: maximum likelihood parameters (D x 1) N, D = X.shape theta_ml = optimize.minimize(lambda theta: -log_likelihood(X, y, theta) ,np.zeros(D), method ='BFGS') ## <-- EDIT THIS LINE theta_ml = (theta_ml.x).reshape(-1,1) return theta_ml ###Output _____no_output_____ ###Markdown **Task 3*** Write a predict function to evaluate your estimate. ###Code ## EDIT THIS FUNCTION def predict(X, theta): # Xtest: K x D matrix of test inputs # theta: D x 1 vector of parameters # returns: prediction of f(Xtest); K x 1 vector prediction = 1/(1+ np.exp(- X @ theta)) ## <-- EDIT THIS LINE K, D=X.shape for i in range(K): if (prediction[i] >= 0.5): prediction[i]=1 else: prediction[i]=0 return prediction ###Output _____no_output_____ ###Markdown With this we're in a good position to fit a logistic regression to our toy dataset and start visualising the results. Have a go.1. Use the function you wrote above to estimate $\boldsymbol\theta_{\mathrm{ML}}$ on the toy dataset.2. Visualize the results, including: 1. The data $x$ and target labels $y$. 2. The labels predicted by the model. 3. The probability assigned by the model, $\sigma(x\theta)$ as a function of $x$. ###Code ## ADD CODE HERE # Fit and plot the logistic regression plt.figure(figsize=(10,8)) theta_estim = max_lik_estimate(x, y) plt.plot(x[:,1],y,'.', label='data vs target label'); ypredict= predict(x, theta_estim) plt.plot(x[:,1], ypredict,'+', label='data vs predict label'); sigm = 1/(1+ np.exp(-x @ theta_estim)) plt.plot(x[:,1], sigm, '.', label='probability'); acc= ((y==predict(x,theta_estim)).mean()) print( "The accuracy of the model is :", acc) t=-theta_estim[0]/theta_estim[1] plt.axvline(t, label='Decision boundary'); plt.legend(loc = 'best'); l_fun = lambda th: log_likelihood(x, y, np.array([[0],[th]])) plt.plot(x[:,1], [l_fun(th) for th in x[:,1]], '.', label='log-likelihood') plt.legend(loc = 'best'); ###Output _____no_output_____ ###Markdown There you go! That should be a nice and easy fit. There are a few things we can start playing with at this point:* Evaluate the performance of your model: plot the decision boundary, likelihood and accuracy on held-out test sets, etc.* Write a gradient-based and a non-gradient-based optimizer. Do they arrive at the same result? Which one takes longer? Which one evaluates the likelihood function more times?(Warning: if the plot looks odd and you get several warnings, it may be that the data is linearly separable and the sigmoid is saturating, leading to `np.log(0)` numerical problems. Add more noise and retry.) Bayesian logistic regression MAP estimate Now let's move to Bayesian inference on the parameters $\boldsymbol\theta$. Let's put a prior on them. Because that's what we do. We put priors on things.More specifically, let's use a Gaussian prior parametrized by a mean $\boldsymbol m$ and a variance $\boldsymbol S$:$$\boldsymbol\theta \sim \mathcal{N}(\boldsymbol m, \boldsymbol S)$$Given that $\boldsymbol\theta_{\mathrm{ML}}$ had no analytical solution, it should really come as no surprise that $\boldsymbol\theta_{\mathrm{MAP}}$ doesn't either. That should be no problem for a machine learning expert like you:**Task 4**1. Write down the equation for the full unnormalized posterior $p(\boldsymbol\theta | \boldsymbol X, \boldsymbol y) \propto p(\boldsymbol y | \boldsymbol\theta, \boldsymbol X) p(\boldsymbol\theta)$.2. Write a separate function for it, as we did with the log-likelihood above.3. Optimize it to find $\boldsymbol\theta_{\mathrm{MAP}}$ and use it to make predictions. ###Code ## EDIT THIS FUNCTION def unnormalized_posterior(X,y,theta,m,s): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # theta: parameters (D x 1) # m : mean (D x1) # S : covariance (D x D) # returns: log likelihood, scalar theta = theta.reshape(-1,1) inv = np.linalg.inv(s) logp = (-0.5 *(theta-m).T @ inv @ (theta-m))+log_likelihood(X, y, theta) return -logp def map_estimate(X, y, m, S): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # m: D x 1 prior mean of parameters # S: D x D prior covariance of parameters # returns: maximum a posteriori parameters (D x 1) N, D = X.shape theta_map = optimize.minimize(lambda theta: unnormalized_posterior(X,y,theta,m,S) ,np.zeros(D), method ='BFGS') ## <-- EDIT THIS LINE theta_map = (theta_map.x).reshape(-1,1) return theta_map ###Output _____no_output_____ ###Markdown Now you can perform a similar model evaluation as you did before. How does your prior influence the MAP estimate and the model's performance? ###Code ## ADD CODE HERE # Fit and plot the MAP logistic regression estimate m = np.zeros((D, 1)) S = 5*np.eye(D) plt.figure(figsize=(10,8)) theta_est = map_estimate(x, y, m, S) plt.plot(x[:,1],y,'.', label='data vs target label'); ytes= predict(x, theta_est) plt.plot(x[:,1], ytes,'+', label='data vs predict label'); sig = 1/(1+ np.exp(-x @ theta_est)) plt.plot(x[:,1], sig, '.', label='probability'); ac= ((y==predict(x,theta_est.reshape(-1,1))).mean()) print( "The accuracy of the model is :", ac) t1=-theta_est[0]/theta_est[1] plt.axvline(t1, label='Decision boundary'); plt.legend(loc = 'best'); ###Output The accuracy of the model is : 0.82 ###Markdown The Laplace approximation As we have hinted above, in logistic regression the posterior distribution over $\boldsymbol\theta$ doesn't have an analytical solution. This is the first example in the course of _approximate Bayesian inference_: The exact posterior is analytically intractable so that we have to approximate it using one of various techniques. The one we'll use in this part of the tutorial is called the **Laplace approximation**.In brief, **the Laplace approximation is a Gaussian centered at the peak of the pdf of interest with the same curvature**. Let's make this a bit more rigorous below.Let's say we have a probability distribution $p(\boldsymbol z)$ we want to approximate. The distribution $p(\boldsymbol z)$ is of the form$$p(\boldsymbol z) = \frac{1}{Z} \tilde{p}(\boldsymbol z) ~ ,$$where $\tilde{p}(\boldsymbol z)$ is an unnormalized distribution that we can evaluate easily, but $Z$ is unknown. Formally, the Laplace approximation results from a second-order Taylor expansion of $\log \tilde{p}(\boldsymbol z)$ around $\boldsymbol z_0$:$$\log \tilde{p}(\boldsymbol z) \approx \log \tilde{p}(\boldsymbol z_0) + \frac{d}{d\boldsymbol z}\log \tilde{p}(\boldsymbol z)\Big|_{\boldsymbol z=\boldsymbol z_0}(\boldsymbol z -\boldsymbol z_0) + \frac{1}{2}(\boldsymbol z-\boldsymbol z_0)^\top\frac{d^2}{d\boldsymbol z^2} \log \tilde{p}(\boldsymbol z)\Big|_{\boldsymbol z=\boldsymbol z_0}(\boldsymbol z-\boldsymbol z_0)$$Now let's evaluate this expression at the mode of $p(\boldsymbol z)$ &ndash; which is the same as the mode of $\tilde{p}(\boldsymbol z)$. We define the mode $\boldsymbol z^*$ such that$$\frac{d}{d\boldsymbol z} \tilde{p}(\boldsymbol z) \Big|_{\boldsymbol z = \boldsymbol z^*} = \boldsymbol 0 ~ .$$At this point, the $\mathcal{O}(\boldsymbol z)$ term of the expansion vanishes and we are left with$$\log \tilde{p}(\boldsymbol z) \approx \log \tilde{p}(\boldsymbol z^*) - \frac{1}{2}(\boldsymbol z-\boldsymbol z^*)^\top\boldsymbol A(\boldsymbol z-\boldsymbol z^*)$$Or, equivalently,$$\tilde{p}(\boldsymbol z) \approx \tilde{p}(\boldsymbol z^*) \exp\big(-\tfrac{1}{2}(\boldsymbol z - \boldsymbol z^*)^\top\boldsymbol A(\boldsymbol z - \boldsymbol z^*)\big) ~ ,$$where$$\boldsymbol A = - \frac{d^2}{d\boldsymbol z^2} \log \tilde{p}(\boldsymbol z)\Big|_{\boldsymbol z=\boldsymbol z^*} ~ .$$And now this distribution we know how to normalize, because it's one of those Gaussians we know and love. By inspection, we can identify the mean and the covariance, and write down the Laplace approximation of $p(\boldsymbol z)$ as$$q(\boldsymbol z) = \mathcal{N}(\boldsymbol z | \boldsymbol z^*, \boldsymbol A^{-1})$$ As an example, let's use the unnormalized distribution $\tilde{p}(z) = x e^{-x/2}$. When normalized properly, this is in fact the $\chi^2$ distribution with $k=4$ degrees of freedom. Have a go yourself:1. Plot $p(z)$.2. Take the first derivative of $\tilde{p}(z)$ (or the first derivative of its log), and find its maximum $z^*$ analytically.3. In the same plot, draw a vertical line at $z = z^*$ to verify you got the right answer.4. Take the second derivative of $\log \tilde{p}(z)$ and evaluate it at $z^*$.5. Plot the corresponding Gaussian $q(z)$ and verify the approximation looks reasonable.**Task 5*** Write a function that evaluates the Laplace approximation $q(z)$. ###Code ## EDIT THIS FUNCTION def laplace_q(z): # z: double array of size (T,) # returns: array with Laplace approximation q evaluated # at all points in z q = stats.multivariate_normal.pdf(z,2,4) return q ## ADD CODE HERE # Find the Laplace approximation of x*exp(-x/2) with pen and paper and then plot it. z = np.linspace(0,10) p = stats.chi2.pdf(z, 4) plt.plot(z,p, 'r', label= 'chi square distribution') plt.axvline(x=2 , label='z=z*') plt.plot(z, laplace_q(z), 'g', label='Laplace approximation'); plt.legend(loc='best'); ###Output _____no_output_____ ###Markdown Bayesian logistic regression (for real this time) Now we have obtained the mode (peak) of the posterior through the MAP estimate above, it's time to go all the way and calculate the posterior over $\boldsymbol\theta$. However, as we mentioned above the posterior doesn't have an analytical form, so we'll use &ndash; you guessed it &ndash; the Laplace transform.**Task 6*** Write a function, based on your previous code, that will calculate the Laplace approximation $q(\boldsymbol\theta)$ of the true posterior $p(\boldsymbol\theta | \boldsymbol X, \boldsymbol y)$ and return the mean and variance of $q$.To visualize the behavior and the diversity of $q$, draw a number $j = 1, ..., J$ of samples $\boldsymbol\theta_j \sim q(\boldsymbol\theta)$. For each sample, plot its predicted class probabilities $\sigma(x \boldsymbol\theta_j)$._Hint_: the extension of the Laplace approximation to multivariate distributions is straightforward, and in this case the variance of the Gaussian is the Hessian of the negative log likelihood $\boldsymbol A = - \nabla_\theta \nabla_\theta \log p(\boldsymbol\theta | \boldsymbol X, \boldsymbol y)$. ###Code ## EDIT THIS FUNCTION def unnormalized_posterior(X,y,theta,m,s): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # theta: parameters (D x 1) # m : mean (D x1) # S : covariance (D x D) # returns: log likelihood, scalar theta = theta.reshape(-1,1) inv = np.linalg.inv(s) logp = (-0.5 *(theta-m).T @ inv @ (theta-m))+log_likelihood(X, y, theta) return -logp def get_posterior(X, y, m, S): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # m: D x 1 prior mean of parameters # S: D x D prior covariance of parameters # returns: maximum a posteriori parameters (D x 1) # covariance of Laplace approximation (D x D) N, D = X.shape opt= optimize.minimize(lambda theta: unnormalized_posterior(X,y,theta,m,S) ,np.zeros(D), method ='BFGS') ## <-- EDIT THIS LINE mu_post = opt.x ## <-- EDIT THESE LINES S_post = opt.hess_inv return mu_post, S_post ## ADD CODE HERE # Calculate the Laplace approximation of the posterior for theta, # draw a few samples and plot the corresponding likelihood functions # for each one. m = np.zeros((D, 1)) S = 5*np.eye(D) nb_samples = 5 theta_map, S_post = get_posterior(x, y, m, S) plt.scatter(x[:,1], y) for i in range(nb_samples): thet=np.random.multivariate_normal(theta_map, S_post) sigmo = 1/(1+ np.exp(- x @ thet)) plt.plot(x[:,1], sigmo, '.') ## <--EDIT THIS LINE plt.show() ###Output _____no_output_____ ###Markdown Comparing posterior approximations The Laplace approximation is part of a family of methods known as _deterministic approximate inference_. In addition, there's another set of methods known as _stochastic approximate inference_ which, as you can guess includes most of the sampling techniques you have studied.You must be an expert in sampling by now. Let's actually go and check whether this Laplace approximation we just made is legit.* What sampling methods do you know to sample from an unnormalized distribution?For example, let's try the Metropolis algorithm.1. Write a proposal function to move in $\boldsymbol\theta$-space.2. Write a function to accept or reject new proposals based on the Metropolis criterion.3. Write a loop and run the Markov chain for a few thousand iterations.4. Check that the sampling worked: did the Markov chain mix properly? What's the acceptance rate? How does it depend on the proposal function?**Task 7*** Write a function to sample from the true posterior $p(\boldsymbol\theta | \boldsymbol X, \boldsymbol y)$. ###Code ## EDIT THIS FUNCTION def posterior_sample(X, y, m, S, nb_iter): # X: N x D matrix of training inputs # y: N x 1 vector of training targets/observations # m: D x 1 prior mean of parameters # S: D x D prior covariance of parameters # returns: nb_iter x D matrix of posterior samples D = X.shape[1] sigma=np.identity(D) theta0=np.array([0,0]) samples = np.zeros((nb_iter,D)) m = m.ravel() for i in range(nb_iter): theta1= np.random.multivariate_normal(theta0, sigma) q1=stats.multivariate_normal.pdf(theta0,theta1, sigma) mu1=1/(1+ np.exp(- X @ theta1)) p1=(np.exp(sum(np.log(stats.bernoulli.pmf(y[n],mu1[n])) for n in range(N))) * stats.multivariate_normal.pdf(theta1, m, S)) q0=stats.multivariate_normal.pdf(theta1,theta0, sigma) mu0=1/(1+ np.exp(- X @ theta0)) p0=(np.exp(sum(np.log(stats.bernoulli.pmf(y[n],mu0[n])) for n in range(N))) * stats.multivariate_normal.pdf(theta0, m, S)) u=np.random.uniform (0,1) if (q1 * p1 / (q0 * p0)>= u): samples[i,:] = theta1 theta0=theta1 else: samples[i,:]=theta0 return samples ###Output _____no_output_____ ###Markdown Finally, let's plot the results and see if both inference methods arrive at roughly the same posterior.In the same axis, plot* The histogram pdf of the MCMC samples (you may want to look at the `density` option in `plt.hist`); and* The Laplace posterior.Make one plot for the intercept ($\theta_0$) and one for the slope ($\theta_1$). What do they look like? Do they match? What kinds of posteriors do you think the Laplace approximation will be good or bad at approximating? ###Code ## ADD CODE HERE # Plot a histogram of the MCMC posterior samples and the # analytical expression for the Laplace posterior. If # everything went well, the peaks should coincide and # their widths should be comparable. nb_iter = 10000 samples = posterior_sample(x, y, m, S, nb_iter) sample = posterior_sample(x, y, m, S, nb_iter) plt.figure(figsize=(10,8)) plt.hist(sample[:,0],20,normed=True); plt.hist(sample[:,1],20,normed=True); ###Output _____no_output_____ ###Markdown Et violà! Now you're an expert in logistic regression. (Wait, I think that's a big violin. I meant to say: et voilà!)Now we can visualize the posterior we can play around with the data and the inference parameters:* Play around with the data generation process. What happens as you increase/decrease $N$ and the jitter parameter?* What does the joint posterior look like? Make a visualization of the MCMC and Laplace approximations in the $(\theta_0, \theta_1)$ plane.* What happens if the model is misspecified? Take out the intercept term in the model (i.e., remove the column of ones in $\boldsymbol X$), but set the `offset` in the data generation process to non-zero. What happens to the posterior and its Laplace approximation? ###Code plt.figure(figsize=(10,8)) plt.hist(sample[:,0],20,normed=True); plt.hist(sample[:,1],20,normed=True); z = np.linspace(-4,4,100) pz = stats.multivariate_normal.pdf(z,theta_map[0], S_post[0,0]) pz2 = stats.multivariate_normal.pdf(z,theta_map[1], S_post[1,1]) plt.plot(z,pz); plt.plot(z,pz2); ###Output _____no_output_____
ja/2_upload-csv-data/2_upload-csv-data.ipynb
###Markdown 2. SDKを使って複数のCSVファイルをアップロードしてみようこのチュートリアルでは、**intdash SDK for Python** (以下、intdash SDKと呼びます)を使用して多数のCSVファイルをアップロードする方法を紹介します。今回のケースでは、CSVファイル1ファイルごとに1つの計測を作成します。CSVファイルのフォーマットについては、以下の「事前準備」を参考にしてください。 2.0 事前準備本シナリオを実施する前に、以下を用意する必要があります。- データ登録用のエッジ- アップロードする複数のCSV 使用データ本チュートリアルでは、以下のデータをサーバー側に準備する必要があります。 本項では以下のデータ名で処理を実施しています。|データ項目|本シナリオで登場するデータ名||:---|:---||ユーザーアカウント| (任意のユーザー名) ||時系列データを登録するエッジ|edge1||時系列データが格納されたCSV| sampleX.csv (X = 任意の数字)| アップロードするCSVデータの詳細今回アップロードするCSVは、以下の条件を満たすものとします。 * 第1行は、各列の名称を文字列で格納していること * 第1列は、タイムスタンプを格納していること * 第2列目以降には、各列名ごとのデータを格納していること第1行に記載されている名称をデータの名前( `data_id` )として使用します。 アップロードするCSVファイルの配置登録する対象となる時系列データが格納された複数のCSVファイルは、本Jupyter Notebookと同じディレクトリに存在する `csv` ディレクトリ配下に配置します。本チュートリアルでは、既に配置されているサンプルのCSVファイルを用いて実施しています。 パッケージのimportとクライアントの生成`intdash.Client` に与える `url` は intdashサーバーの環境情報から、`username` と `password` はログイン用ユーザーアカウントで発行したアクセス情報を指定してください。 ###Code import pandas as pd import math import intdash from intdash import timeutils # Create client client = intdash.Client( url = "https://example.intdash.jp", username = "user1", password="password_here" ) ###Output _____no_output_____ ###Markdown 以上で事前準備は終了です。 2.1 データのアップロードに使用するエッジを取得するまず最初に、CSVファイルをアップロードする際に使用するエッジを取得します。 ###Code edges = client.edges.list(name='edge1') edge1 = edges[0] edge1.name ###Output _____no_output_____ ###Markdown 2.2 CSVファイルを取得する各CSVファイルを `pandas.DataFrame` として読み込みます。ここでは `csv/` ディレクトリに格納されているCSVファイルを対象にします。 ###Code import glob csv_files = glob.glob('./csv/*') dfs = [] for csv_path in csv_files: df = pd.read_csv(csv_path, index_col=0).groupby("time").last() dfs.append(df) ###Output _____no_output_____ ###Markdown 2.3 CSVファイルごとに計測を作成し、データを登録するここからは1つのCSVファイルごとの処理に焦点をあてて解説します。これらの処理を繰り返すことで、複数のCSVファイルをアップロードすることができます。 ※ すぐに複数ファイルを処理したい場合、本節をスキップしてください。 ###Code # Pick first DataFrame. df = dfs.pop(0) ###Output _____no_output_____ ###Markdown データの最初の時刻を計測の開始時刻とし、計測を作成します。 ###Code new_measurements = client.measurements.create( name='csv_data', # Define name of measurement. basetime=timeutils.str2timestamp(df.index[0]), # Use timestamp of the first datapoint as the basetime of the measurement. edge_uuid=edge1.uuid ) ###Output _____no_output_____ ###Markdown 計測の生成後、DataFrameを `DataPoint` 形式に変換します。 ###Code datapoints = [] for data_id, values in df.to_dict().items(): for time, value in values.items(): if math.isnan(value) or value == '': continue datapoints.append( intdash.DataPoint( elapsed_time= timeutils.str2timestamp(time) - new_measurements.basetime, # Time elapsed from the start of measurement. data_type=intdash.DataType.float, channel=1, # fixed at 1. data_payload=intdash.data.Float(data_id=data_id, value=value).to_payload() ) ) ###Output _____no_output_____ ###Markdown 変換が完了したら、先程作成した計測に時系列データを紐付けます。 ###Code client.data_points.store( measurement_uuid=new_measurements.uuid, data_points=datapoints ) ###Output _____no_output_____ ###Markdown これで1つのCSVデータのアップロードが完了しました。 2.4 複数CSVファイルに対してまとめて登録処理を行う以下は、前節の処理を1つにまとめ、複数のDataFrameに対して繰り返し処理をおこなっています。 ###Code for df in dfs: # Create a measurement. new_measurements = client.measurements.create( name='csv_data', # Define name of measurement. basetime=timeutils.str2timestamp(df.index[0]), # Use timestamp of the first datapoint as the basetime of the measurement. edge_uuid=edge1.uuid ) # Convert DataFrames to DataPoints. datapoints = [] for data_id, values in df.to_dict().items(): for time, value in values.items(): if math.isnan(value) or value == '': continue datapoints.append( intdash.DataPoint( elapsed_time= timeutils.str2timestamp(time) - new_measurements.basetime, # Time elapsed from the start of measurement. data_type=intdash.DataType.float, channel=1, # fixed at 1. data_payload=intdash.data.Float(data_id=data_id, value=value).to_payload() ) ) # Store datapoints. client.data_points.store( measurement_uuid=new_measurements.uuid, data_points=datapoints ) ###Output _____no_output_____
week-4/week-4-1-supervised-learning-class.ipynb
###Markdown Class 4-1: Supervised learning ###Code import pandas as pd from sklearn import tree import matplotlib.pyplot as plt from pandas.plotting import scatter_matrix from sklearn.model_selection import train_test_split from sklearn import metrics %matplotlib inline ###Output _____no_output_____ ###Markdown Today we're going to look at GSS data asking people whether the government is spending too much or too little on various problems. We're going to try to predict one of the answers from all of the others. Exploring the data ###Code # load it gss = pd.read_csv('GSS-spending.csv') gss.shape # take a look gss.head() gss['Welfare'].value_counts() # Our usual GSS cleanup # drop the last two rows, which are just notes and do not contain data gss = gss.iloc[0:-2,:] # Drop some columns that don't contain useful information gss = gss.drop(['Respondent id number', 'Ballot used for interview', 'Gss year for this respondent '], axis=1) # recode answers on a -1, 0, 1 scale gss = gss.replace({'Too little':-1, 'About right':0, 'Too much':1}) # Everything else gets NA gss = gss.replace({'Not applicable' : None, 'No answer' : None, 'Don\'t know' : None}) gss.head() # For a decision tree to work we can't have any missing values. Drop all rows with any NAs gss = gss.dropna(how='any') gss.shape ###Output _____no_output_____ ###Markdown The simplest way to see if some variables are going to be predictable from others is to check out the overall correlations. ###Code gss.corr() ###Output _____no_output_____ ###Markdown Building a decision-treeWe're going to use a machine learning technique called a decision tree to try to predict the answer on one question from the answers on all the others. ###Code # By changing this line and re-running the workbook, we can see how predictiable different variables are or aren't. predict_col = 'Social security' gss[predict_col].value_counts() ###Output _____no_output_____ ###Markdown Actually applying the decision tree is very much like doing a regresion. ###Code # Fit a decision tree to the test data x = gss.drop(predict_col, axis=1).values y = gss[[predict_col]].values dt = tree.DecisionTreeClassifier() dt.fit(x,y) ###Output _____no_output_____ ###Markdown Ok, now we need to evaluate the result, by seeing how well it can predict our question from all the others. ###Code # Get the predictions y_pred = dt.predict(x) y_pred[:10] # What percentage did it predict right? metrics.accuracy_score(y, y_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y, y_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown So what is this tree thing? We can actually draw it To get this working, you will need to install `GraphViz`. On Windows, download it [here](https://graphviz.gitlab.io/download/). On Mac, try `brew install graphviz` ###Code from sklearn.tree import export_graphviz import graphviz feature_names = gss.columns.drop(predict_col) export_graphviz(dt, feature_names=feature_names, rounded=True, out_file="mytree.dot") with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) ###Output _____no_output_____ ###Markdown A more realistic testThe test of a classifier is not whether it can reproduce the output training data -- it's how well it does on data it's never seen before. So let's split our data into training and test sets, and try this again. ###Code # Split off a random third of this data to use for testing. train, test = train_test_split(gss, test_size=0.3) train.shape test.shape # The x values don't contain the column we want to predict. The y value is only that column. x_train = train.drop(predict_col, axis=1).values y_train = train[[predict_col]].values x_test = test.drop(predict_col, axis=1).values y_test = test[[predict_col]].values # Fit a decision tree to the training data only dt = tree.DecisionTreeClassifier() dt.fit(x_train,y_train) # What percentage did it predict right on the training data? y_train_pred = dt.predict(x_train) metrics.accuracy_score(y_train, y_train_pred) # What percentage did it predict right on the test data? y_test_pred = dt.predict(x_test) metrics.accuracy_score(y_test, y_test_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y_test, y_test_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown Random Forest: Improving accuracy with a better classifierSometimes improving accuracy is very difficult. You might just not have a very predictable problem! But we can often do better than just a simple decision tree, by using many trees. This is a "random forest," and for our purposes it's just a drop-in replacement for the classifier. ###Code from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(x_train,y_train.ravel()) # What percentage did it predict right on the test data? y_test_pred = rf.predict(x_test) metrics.accuracy_score(y_test, y_test_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y_test, y_test_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown Binary classification metricsIn this section we'll train a basic classifier on our favorite Titantic data, and then calculate some accuracy metrics for a binary predictor where there are only two outcomes: true and false. We'll be using these metrics over and over again in our upcoming discussions of algorithmic accountability.For your reference, Wikipedia has an [amazing chart](https://en.wikipedia.org/wiki/Confusion_matrix) of the various things you can calculate from a binary confusion matrix. ###Code # load titanic.csv once more ti = pd.read_csv('titanic.csv') # recode the pclass and gender variables so they are numeric ti.pclass = ti.pclass.replace({'1st':1, '2nd':2, '3rd':3}) ti['female'] = ti.gender.replace({'male':0, 'female':1}) # Set aside a third of the data for testing. ti_train, ti_test = train_test_split(ti, test_size=0.3, random_state=100) # Set up x and y variables and train a decision tree on the pclass and sex features, to predict survived feature_columns = ['pclass','female'] ti_x = ti_train[feature_columns].values ti_y = ti_train[['survived']].values dt = tree.DecisionTreeClassifier() dt.fit(ti_x,ti_y) # Create ti_test and ti_test values, and use the classifier to predict yti_test_pred ti_x_test = ti_test[feature_columns].values ti_y_test = ti_test[['survived']].values ti_y_test_pred = dt.predict(ti_x_test) # Print out the confusion matrix for the classifier. Use the DataFrame trick we saw in class to label the axes. pd.DataFrame(metrics.confusion_matrix(ti_y_test, ti_y_test_pred), columns=['Predicted 0','Predicted 1'], index=['True 0', 'True 1']) # Ok! What are the number of true positives, true negatives, false positives, and false negatives? TP = 72 TN = 255 FP = 2 FN = 65 # Using only the varaibles TP,TN,FP,FN, calculate and print the overall accuracy accuracy = (TP+TN)/(TP+TN+FP+FN) accuracy # Check that we have the calculation right by comparing to metrics.accuracy_score metrics.accuracy_score(ti_y_test, ti_y_test_pred) # Using only the varaibles TP,TN,FP,FN, calculate and print the false positive rate and the false negative rate FPR = FP/(FP+TN) FPR FNR = FN/(FN+TP) FNR # Now compute and print the precision aka positive predictive value, again from these four variables. precision = TP/(TP+FP) precision ###Output _____no_output_____ ###Markdown Class 4-1: Supervised learning ###Code import pandas as pd from sklearn import tree import matplotlib.pyplot as plt from pandas.plotting import scatter_matrix from sklearn.model_selection import train_test_split from sklearn import metrics %matplotlib inline ###Output _____no_output_____ ###Markdown Today we're going to look at GSS data asking people whether the government is spending too much or too little on various problems. We're going to try to predict one of the answers from all of the others. Exploring the data ###Code # load it gss = pd.read_csv('GSS-spending.csv') gss.shape # take a look gss.head() gss['Welfare'].value_counts() # Our usual GSS cleanup # drop the last two rows, which are just notes and do not contain data gss = gss.iloc[0:-2,:] # Drop some columns that don't contain useful information gss = gss.drop(['Respondent id number', 'Ballot used for interview', 'Gss year for this respondent '], axis=1) # recode answers on a -1, 0, 1 scale gss = gss.replace({'Too little':-1, 'About right':0, 'Too much':1}) # Everything else gets NA gss = gss.replace({'Not applicable' : None, 'No answer' : None, 'Don\'t know' : None}) gss.head() # For a decision tree to work we can't have any missing values. Drop all rows with any NAs gss = gss.dropna(how='any') gss.shape ###Output _____no_output_____ ###Markdown The simplest way to see if some variables are going to be predictable from others is to check out the overall correlations. ###Code gss.corr() ###Output _____no_output_____ ###Markdown Building a decision-treeWe're going to use a machine learning technique called a decision tree to try to predict the answer on one question from the answers on all the others. ###Code # By changing this line and re-running the workbook, we can see how predictiable different variables are or aren't. predict_col = 'Social security' gss[predict_col].value_counts() ###Output _____no_output_____ ###Markdown Actually applying the decision tree is very much like doing a regresion. ###Code # Fit a decision tree to the test data x = gss.drop(predict_col, axis=1).values y = gss[[predict_col]].values dt = tree.DecisionTreeClassifier() dt.fit(x,y) ###Output _____no_output_____ ###Markdown Ok, now we need to evaluate the result, by seeing how well it can predict our question from all the others. ###Code # Get the predictions y_pred = dt.predict(x) y_pred[:10] # What percentage did it predict right? metrics.accuracy_score(y, y_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y, y_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown So what is this tree thing? We can actually draw it To get this working, you will need to install `GraphViz`. On Windows, download it [here](https://graphviz.gitlab.io/download/). On Mac, try `brew install graphviz` ###Code from sklearn.tree import export_graphviz import graphviz feature_names = gss.columns.drop(predict_col) export_graphviz(dt, feature_names=feature_names, rounded=True, out_file="mytree.dot") with open("mytree.dot") as f: dot_graph = f.read() graphviz.Source(dot_graph) ###Output _____no_output_____ ###Markdown A more realistic testThe test of a classifier is not whether it can reproduce the output training data -- it's how well it does on data it's never seen before. So let's split our data into training and test sets, and try this again. ###Code # Split off a random third of this data to use for testing. train, test = train_test_split(gss, test_size=0.3) train.shape test.shape # The x values don't contain the column we want to predict. The y value is only that column. x_train = train.drop(predict_col, axis=1).values y_train = train[[predict_col]].values x_test = test.drop(predict_col, axis=1).values y_test = test[[predict_col]].values # Fit a decision tree to the training data only dt = tree.DecisionTreeClassifier() dt.fit(x_train,y_train) # What percentage did it predict right on the training data? y_train_pred = dt.predict(x_train) metrics.accuracy_score(y_train, y_train_pred) # What percentage did it predict right on the test data? y_test_pred = dt.predict(x_test) metrics.accuracy_score(y_test, y_test_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y_test, y_test_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown Random Forest: Improving accuracy with a better classifierSometimes improving accuracy is very difficult. You might just not have a very predictable problem! But we can often do better than just a simple decision tree, by using many trees. This is a "random forest," and for our purposes it's just a drop-in replacement for the classifier. ###Code from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(x_train,y_train.ravel()) # What percentage did it predict right on the test data? y_test_pred = rf.predict(x_test) metrics.accuracy_score(y_test, y_test_pred) # What mistakes did it make? pd.DataFrame(metrics.confusion_matrix(y_test, y_test_pred), columns=['Predicted -1','Predicted 0','Predicted 1'], index=['True -1', 'True 0', 'True 1']) ###Output _____no_output_____ ###Markdown Binary classification metricsIn this section we'll train a basic classifier on our favorite Titantic data, and then calculate some accuracy metrics for a binary predictor where there are only two outcomes: true and false. We'll be using these metrics over and over again in our upcoming discussions of algorithmic accountability.For your reference, Wikipedia has an [amazing chart](https://en.wikipedia.org/wiki/Confusion_matrix) of the various things you can calculate from a binary confusion matrix. ###Code # load titanic.csv once more ti = pd.read_csv('titanic.csv') # recode the pclass and gender variables so they are numeric ti.pclass = ti.pclass.replace({'1st':1, '2nd':2, '3rd':3}) ti['female'] = ti.gender.replace({'male':0, 'female':1}) # Set aside a third of the data for testing. ti_train, ti_test = train_test_split(ti, test_size=0.3, random_state=100) # Set up x and y variables and train a decision tree on the pclass and sex features, to predict survived feature_columns = ['pclass','female'] ti_x = ti_train[feature_columns].values ti_y = ti_train[['survived']].values dt = tree.DecisionTreeClassifier() dt.fit(ti_x,ti_y) # Create ti_test and ti_test values, and use the classifier to predict yti_test_pred ti_x_test = ti_test[feature_columns].values ti_y_test = ti_test[['survived']].values ti_y_test_pred = dt.predict(ti_x_test) # Print out the confusion matrix for the classifier. Use the DataFrame trick we saw in class to label the axes. pd.DataFrame(metrics.confusion_matrix(ti_y_test, ti_y_test_pred), columns=['Predicted 0','Predicted 1'], index=['True 0', 'True 1']) # Ok! What are the number of true positives, true negatives, false positives, and false negatives? TP = 72 TN = 255 FP = 2 FN = 65 # Using only the varaibles TP,TN,FP,FN, calculate and print the overall accuracy accuracy = (TP+TN)/(TP+TN+FP+FN) accuracy # Check that we have the calculation right by comparing to metrics.accuracy_score metrics.accuracy_score(ti_y_test, ti_y_test_pred) # Using only the varaibles TP,TN,FP,FN, calculate and print the false positive rate and the false negative rate FPR = FP/(FP+TN) FPR FNR = FN/(FN+TP) FNR # Now compute and print the precision aka positive predictive value, again from these four variables. precision = TP/(TP+FP) precision ###Output _____no_output_____
ground_truth_labeling_jobs/from_unlabeled_data_to_deployed_machine_learning_model_ground_truth_demo_image_classification/from_unlabeled_data_to_deployed_machine_learning_model_ground_truth_demo_image_classification.ipynb
###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = '<< YOUR S3 BUCKET NAME >>' assert BUCKET != '<< YOUR S3 BUCKET NAME >>', 'Please provide a custom S3 bucket name.' EXP_NAME = 'ground-truth-ic-demo' # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client('s3') bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open('openimgs-annotations.csv', 'r') as f: all_labels = [line.strip().split(',') for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims['Musical Instrument'] = [label[0] for label in all_labels if (label[2] == '/m/04szw' and label[3] == '1')][:500] ims['Fruit'] = [label[0] for label in all_labels if (label[2] == '/m/02xwb' and label[3] == '1')][:371] ims['Fruit'].remove('02a54f6864478101') # This image contains personal information, let's remove it from our dataset. ims['Cheetah'] = [label[0] for label in all_labels if (label[2] == '/m/0cd4d' and label[3] == '1')][:50] ims['Tiger'] = [label[0] for label in all_labels if (label[2] == '/m/07dm6' and label[3] == '1')][:40] ims['Snowman'] = [label[0] for label in all_labels if (label[2] == '/m/0152hh' and label[3] == '1')][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][:int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client('s3') for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print('Copying image {} / {}'.format((img_id+1), 1000)) copy_source = { 'Bucket': 'open-images-dataset', 'Key': 'test/{}.jpg'.format(img) } s3.copy(copy_source, BUCKET, '{}/images/{}.jpg'.format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = 'input.manifest' with open(manifest_name, 'w') as f: for img in itertools.chain.from_iterable(ims.values()): img_path = 's3://{}/{}/images/{}.jpg'.format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path +'"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + '/' + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) s3.upload_file('class_labels.json', BUCKET, EXP_NAME + '/class_labels.json') ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = ['https://s3.amazonaws.com/open-images-dataset/test/{}'.format(img_id) for img_id in ['0634825fc1dcc96b.jpg', '0415b6a36f3381ed.jpg', '8582cc08068e2d0f.jpg', '8728e9fa662a8921.jpg', '926d31e8cde9055e.jpg']] def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format(*img_examples, categories_str=str(CLASS_LIST) if test_template else '{{ task.input.labels | to_json | escape }}') with open(save_fname, 'w') as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname='instructions.template') s3.upload_file('instructions.template', BUCKET, EXP_NAME + '/instructions.template') ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = '<< your private workteam ARN here >>' ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(region) workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = 'What do you see: a {}?'.format(' a '.join(CLASS_LIST)) task_keywords = ['image', 'classification', 'humans'] task_title = task_description job_name = 'ground-truth-demo-' + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/instructions.template'.format(BUCKET, EXP_NAME), } } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": 's3://{}/{}/class_labels.json'.format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = 's3://{}/{}/output/{}/manifests/output/output.manifest'.format(BUCKET, EXP_NAME, job_name) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open('output.manifest', 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([('-metadata' in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum['source-ref'] groundtruth_labels[datum_id] = str(datum[metakey]['class-name']) confidences[datum_id] = datum[metakey]['confidence'] human[datum_id] = int(datum[metakey]['human-annotated'] == 'yes') groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array([confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames]) machine_confidences = [confidences[np.logical_and(groundtruth_labels == clname, 1-human)] for clname in sorted_clnames] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor='white', dpi=100) plt.subplot(1, 3, 1) plt.title('Annotation histogram') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel('Annotation Count') plt.legend() plt.subplot(1, 3, 2) plt.title('Annotation histogram (logscale)') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale('log') plt.subplot(1, 3, 3) plt.title('Mean confidences') plt.bar(np.arange(n_classes), [conf.mean() for conf in human_confidences], color='gray', hatch='/', edgecolor='k', width=.4) plt.bar(np.arange(n_classes) + .4, [conf.mean() for conf in machine_confidences], color='gray', edgecolor='k', width=.4) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print('Not enough human annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Human Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) #image_fnames plt.axis('off') plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print('Not enough machine annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Auto Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis('off') plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth.pdf') as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by humans'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting human annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by machines'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting machine annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=False, cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = 'd' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split('/')[-1].split('.')[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor='white', figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Full annotation set error {:.2f}%'.format( mean_err), normalize=False) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human==1.] == groundtruth_labels[human==1.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==1.], groundtruth_labels[human==1.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Human annotation set (size {}) error {:.2f}%'.format( int(sum(human)), mean_err), normalize=False) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human==0.) > 0: plt.subplot(133) mean_err = 100 - np.mean(standard_labels[human==0.] == groundtruth_labels[human==0.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==0.], groundtruth_labels[human==0.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Auto-annotation set (size {}) error {:.2f}%'.format( int(len(human) - sum(human)), mean_err), normalize=False) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth_benchmark.pdf') as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by {}'.format(class_name, 'humans' if class_ids_id == 0 else 'machines'), fontsize=20) plt.axis('off') good_ids = class_ids[np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0]] bad_ids = class_ids[np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0]] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting annotations of {}, {}/{}...'.format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS))) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='green', fill=False, transform=ax.transAxes) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='red', fill=False, transform=ax.transAxes) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open('output.manifest', 'r') as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size*0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open('train.manifest', 'w') as f: for line in train_data: f.write(json.dumps(line)) f.write('\n') num_training_samples += 1 with open('validation.manifest', 'w') as f: for line in validation_data: f.write(json.dumps(line)) f.write('\n') ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file('train.manifest',BUCKET, EXP_NAME + '/train.manifest') s3.upload_file('validation.manifest',BUCKET, EXP_NAME + '/validation.manifest') # Create unique job name nn_job_name_prefix = 'groundtruth-augmented-manifest-demo' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri(boto3.Session().region_name, 'image-classification', repo_version='latest') training_params = \ { "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "Pipe" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1" }, "StoppingCondition": { "MaxRuntimeInSeconds": 86400 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'train.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'validation.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" } ] } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print('Transform job started') while(True): status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)['TrainingJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) model_name="groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) primary_container = { 'Image': training_image, 'ModelDataUrl': model_data, } create_model_response = sagemaker_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = \ { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": batch_input } }, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None" }, "TransformResources": { "InstanceType": "ml.p2.xlarge", "InstanceCount": 1 } } print('Transform job name: {}'.format(batch_job_name)) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while(True): response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response['TransformJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name + '-epc' + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name + '-ep' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker_client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') with open('test_inputs/{}'.format(test_images[0]), 'rb') as f: payload = f.read() payload = bytearray(payload) client = boto3.client('sagemaker-runtime') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) # `response` comes in a json format, let's unpack it. result = json.loads(response['Body'].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print('Model prediction is: {}'.format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* The S3 bucket that you use for this demo must have a CORS policy attached. To learn more about this requirement, and how to attach a CORS policy to an S3 bucket, see [CORS Permission Requirement](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-cors-update.html).* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = "<< YOUR S3 BUCKET NAME >>" assert BUCKET != "<< YOUR S3 BUCKET NAME >>", "Please provide a custom S3 bucket name." EXP_NAME = "ground-truth-ic-demo" # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client("s3") bucket_region = s3.head_bucket(Bucket=BUCKET)["ResponseMetadata"]["HTTPHeaders"][ "x-amz-bucket-region" ] assert ( bucket_region == region ), "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open("openimgs-annotations.csv", "r") as f: all_labels = [line.strip().split(",") for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims["Musical Instrument"] = [ label[0] for label in all_labels if (label[2] == "/m/04szw" and label[3] == "1") ][:500] ims["Fruit"] = [label[0] for label in all_labels if (label[2] == "/m/02xwb" and label[3] == "1")][ :371 ] ims["Fruit"].remove( "02a54f6864478101" ) # This image contains personal information, let's remove it from our dataset. ims["Cheetah"] = [label[0] for label in all_labels if (label[2] == "/m/0cd4d" and label[3] == "1")][ :50 ] ims["Tiger"] = [label[0] for label in all_labels if (label[2] == "/m/07dm6" and label[3] == "1")][ :40 ] ims["Snowman"] = [ label[0] for label in all_labels if (label[2] == "/m/0152hh" and label[3] == "1") ][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][: int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client("s3") for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print("Copying image {} / {}".format((img_id + 1), 1000)) copy_source = {"Bucket": "open-images-dataset", "Key": "test/{}.jpg".format(img)} s3.copy(copy_source, BUCKET, "{}/images/{}.jpg".format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = "input.manifest" with open(manifest_name, "w") as f: for img in itertools.chain.from_iterable(ims.values()): img_path = "s3://{}/{}/images/{}.jpg".format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path + '"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + "/" + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = {"labels": [{"label": label} for label in CLASS_LIST]} with open("class_labels.json", "w") as f: json.dump(json_body, f) s3.upload_file("class_labels.json", BUCKET, EXP_NAME + "/class_labels.json") ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = [ "https://s3.amazonaws.com/open-images-dataset/test/{}".format(img_id) for img_id in [ "0634825fc1dcc96b.jpg", "0415b6a36f3381ed.jpg", "8582cc08068e2d0f.jpg", "8728e9fa662a8921.jpg", "926d31e8cde9055e.jpg", ] ] def make_template(test_template=False, save_fname="instructions.template"): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format( *img_examples, categories_str=str(CLASS_LIST) if test_template else "{{ task.input.labels | to_json | escape }}", ) with open(save_fname, "w") as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname="instructions.html") make_template(test_template=False, save_fname="instructions.template") s3.upload_file("instructions.template", BUCKET, EXP_NAME + "/instructions.template") ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = "<< your private workteam ARN here >>" ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = { "us-west-2": "081040173940", "us-east-1": "432418664414", "us-east-2": "266458841044", "eu-west-1": "568282634449", "ap-northeast-1": "477331159723", } prehuman_arn = "arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass".format( region, ac_arn_map[region] ) acs_arn = "arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass".format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = "arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification".format( region ) workteam_arn = "arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default".format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = "What do you see: a {}?".format(" a ".join(CLASS_LIST)) task_keywords = ["image", "classification", "humans"] task_title = task_description job_name = "ground-truth-demo-" + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": "s3://{}/{}/instructions.template".format(BUCKET, EXP_NAME), }, } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig": { "DataSource": { "S3DataSource": { "ManifestS3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": ["FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent"] }, }, "OutputConfig": { "S3OutputPath": "s3://{}/{}/output/".format(BUCKET, EXP_NAME), }, "HumanTaskConfig": human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": "s3://{}/{}/class_labels.json".format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request["LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = "s3://{}/{}/output/{}/manifests/output/output.manifest".format( BUCKET, EXP_NAME, job_name ) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open("output.manifest", "r") as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([("-metadata" in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum["source-ref"] groundtruth_labels[datum_id] = str(datum[metakey]["class-name"]) confidences[datum_id] = datum[metakey]["confidence"] human[datum_id] = int(datum[metakey]["human-annotated"] == "yes") groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array( [confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames] ) machine_confidences = [ confidences[np.logical_and(groundtruth_labels == clname, 1 - human)] for clname in sorted_clnames ] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor="white", dpi=100) plt.subplot(1, 3, 1) plt.title("Annotation histogram") plt.bar(range(n_classes), human_sizes, color="gray", hatch="/", edgecolor="k", label="human") plt.bar( range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color="gray", edgecolor="k", label="machine", ) plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel("Annotation Count") plt.legend() plt.subplot(1, 3, 2) plt.title("Annotation histogram (logscale)") plt.bar(range(n_classes), human_sizes, color="gray", hatch="/", edgecolor="k", label="human") plt.bar( range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color="gray", edgecolor="k", label="machine", ) plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale("log") plt.subplot(1, 3, 3) plt.title("Mean confidences") plt.bar( np.arange(n_classes), [conf.mean() for conf in human_confidences], color="gray", hatch="/", edgecolor="k", width=0.4, ) plt.bar( np.arange(n_classes) + 0.4, [conf.mean() for conf in machine_confidences], color="gray", edgecolor="k", width=0.4, ) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor="white", dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print("Not enough human annotations to show for class: {}".format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title("Human Label: " + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) # image_fnames plt.axis("off") plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor="white", dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print("Not enough machine annotations to show for class: {}".format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title("Auto Label: " + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis("off") plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor="white", dpi=100) with PdfPages("ground_truth.pdf") as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s="Images annotated as {} by humans".format(class_name), fontsize=20) plt.axis("off") class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting human annotations of {}, {}/{}...".format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS) ) ) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s="Images annotated as {} by machines".format(class_name), fontsize=20) plt.axis("off") class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[ 0 ] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting machine annotations of {}, {}/{}...".format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS) ) ) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix( cm, classes, title="Confusion matrix", normalize=False, cmap=plt.cm.Blues ): if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = "d" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split("/")[-1].split(".")[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor="white", figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Full annotation set error {:.2f}%".format(mean_err), normalize=False, ) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human == 1.0] == groundtruth_labels[human == 1.0]) * 100 cnf_matrix = confusion_matrix(standard_labels[human == 1.0], groundtruth_labels[human == 1.0]) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Human annotation set (size {}) error {:.2f}%".format(int(sum(human)), mean_err), normalize=False, ) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human == 0.0) > 0: plt.subplot(133) mean_err = ( 100 - np.mean(standard_labels[human == 0.0] == groundtruth_labels[human == 0.0]) * 100 ) cnf_matrix = confusion_matrix(standard_labels[human == 0.0], groundtruth_labels[human == 0.0]) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Auto-annotation set (size {}) error {:.2f}%".format( int(len(human) - sum(human)), mean_err ), normalize=False, ) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor="white", dpi=100) with PdfPages("ground_truth_benchmark.pdf") as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[ 0 ] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text( 0.1, 0.5, s="Images annotated as {} by {}".format( class_name, "humans" if class_ids_id == 0 else "machines" ), fontsize=20, ) plt.axis("off") good_ids = class_ids[ np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0] ] bad_ids = class_ids[ np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0] ] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting annotations of {}, {}/{}...".format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS) ) ) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle( (0, 0), 1, 1, lw=10, edgecolor="green", fill=False, transform=ax.transAxes ) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle( (0, 0), 1, 1, lw=10, edgecolor="red", fill=False, transform=ax.transAxes ) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open("output.manifest", "r") as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size * 0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open("train.manifest", "w") as f: for line in train_data: f.write(json.dumps(line)) f.write("\n") num_training_samples += 1 with open("validation.manifest", "w") as f: for line in validation_data: f.write(json.dumps(line)) f.write("\n") ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file("train.manifest", BUCKET, EXP_NAME + "/train.manifest") s3.upload_file("validation.manifest", BUCKET, EXP_NAME + "/validation.manifest") # Create unique job name nn_job_name_prefix = "groundtruth-augmented-manifest-demo" timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri( boto3.Session().region_name, "image-classification", repo_version="latest" ) training_params = { "AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "Pipe"}, "RoleArn": role, "OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output/".format(BUCKET, EXP_NAME)}, "ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50}, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1", }, "StoppingCondition": {"MaxRuntimeInSeconds": 86400}, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, "train.manifest"), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref", "category"], } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None", }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, "validation.manifest"), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref", "category"], } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None", }, ], } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print("Transform job started") while True: status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)[ "TrainingJobStatus" ] if status == "Completed": print("Transform job ended with status: " + status) break if status == "Failed": message = response["FailureReason"] print("Transform failed with the following error: {}".format(message)) raise Exception("Transform job failed") time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) model_name = "groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info["ModelArtifacts"]["S3ModelArtifacts"] print(model_data) primary_container = { "Image": training_image, "ModelDataUrl": model_data, } create_model_response = sagemaker_client.create_model( ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container ) print(create_model_response["ModelArn"]) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": "s3://{}/{}/{}/output/".format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}}, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None", }, "TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1}, } print("Transform job name: {}".format(batch_job_name)) sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while True: response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response["TransformJobStatus"] if status == "Completed": print("Transform job ended with status: " + status) break if status == "Failed": message = response["FailureReason"] print("Transform failed with the following error: {}".format(message)) raise Exception("Transform job failed") time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) endpoint_config_name = job_name + "-epc" + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=[ { "InstanceType": "ml.m4.xlarge", "InitialInstanceCount": 1, "ModelName": model_name, "VariantName": "AllTraffic", } ], ) print("Endpoint configuration name: {}".format(endpoint_config_name)) print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) endpoint_name = job_name + "-ep" + timestamp print("Endpoint name: {}".format(endpoint_name)) endpoint_params = { "EndpointName": endpoint_name, "EndpointConfigName": endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print("EndpointArn = {}".format(endpoint_response["EndpointArn"])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response["EndpointStatus"] print("EndpointStatus = {}".format(status)) # wait until the status has changed sagemaker_client.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response["EndpointStatus"] print("Endpoint creation ended with EndpointStatus = {}".format(status)) if status != "InService": raise Exception("Endpoint creation failed.") with open("test_inputs/{}".format(test_images[0]), "rb") as f: payload = f.read() payload = bytearray(payload) client = boto3.client("sagemaker-runtime") response = client.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/x-image", Body=payload ) # `response` comes in a json format, let's unpack it. result = json.loads(response["Body"].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print("Model prediction is: {}".format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = '<< YOUR S3 BUCKET NAME >>' assert BUCKET != '<< YOUR S3 BUCKET NAME >>', 'Please provide a custom S3 bucket name.' EXP_NAME = 'ground-truth-ic-demo' # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client('s3') bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open('openimgs-annotations.csv', 'r') as f: all_labels = [line.strip().split(',') for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims['Musical Instrument'] = [label[0] for label in all_labels if (label[2] == '/m/04szw' and label[3] == '1')][:500] ims['Fruit'] = [label[0] for label in all_labels if (label[2] == '/m/02xwb' and label[3] == '1')][:371] ims['Fruit'].remove('02a54f6864478101') # This image contains personal information, let's remove it from our dataset. ims['Cheetah'] = [label[0] for label in all_labels if (label[2] == '/m/0cd4d' and label[3] == '1')][:50] ims['Tiger'] = [label[0] for label in all_labels if (label[2] == '/m/07dm6' and label[3] == '1')][:40] ims['Snowman'] = [label[0] for label in all_labels if (label[2] == '/m/0152hh' and label[3] == '1')][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][:int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client('s3') for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print('Copying image {} / {}'.format((img_id+1), 1000)) copy_source = { 'Bucket': 'open-images-dataset', 'Key': 'test/{}.jpg'.format(img) } s3.copy(copy_source, BUCKET, '{}/images/{}.jpg'.format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = 'input.manifest' with open(manifest_name, 'w') as f: for img in itertools.chain.from_iterable(ims.values()): img_path = 's3://{}/{}/images/{}.jpg'.format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path +'"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + '/' + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) s3.upload_file('class_labels.json', BUCKET, EXP_NAME + '/class_labels.json') ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = ['https://s3.amazonaws.com/open-images-dataset/test/{}'.format(img_id) for img_id in ['0634825fc1dcc96b.jpg', '0415b6a36f3381ed.jpg', '8582cc08068e2d0f.jpg', '8728e9fa662a8921.jpg', '926d31e8cde9055e.jpg']] def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format(*img_examples, categories_str=str(CLASS_LIST) if test_template else '{{ task.input.labels | to_json | escape }}') with open(save_fname, 'w') as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname='instructions.template') s3.upload_file('instructions.template', BUCKET, EXP_NAME + '/instructions.template') ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = '<< your private workteam ARN here >>' ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(region) workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = 'What do you see: a {}?'.format(' a '.join(CLASS_LIST)) task_keywords = ['image', 'classification', 'humans'] task_title = task_description job_name = 'ground-truth-demo-' + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/instructions.template'.format(BUCKET, EXP_NAME), } } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": 's3://{}/{}/class_labels.json'.format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = 's3://{}/{}/output/{}/manifests/output/output.manifest'.format(BUCKET, EXP_NAME, job_name) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open('output.manifest', 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([('-metadata' in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum['source-ref'] groundtruth_labels[datum_id] = str(datum[metakey]['class-name']) confidences[datum_id] = datum[metakey]['confidence'] human[datum_id] = int(datum[metakey]['human-annotated'] == 'yes') groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array([confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames]) machine_confidences = [confidences[np.logical_and(groundtruth_labels == clname, 1-human)] for clname in sorted_clnames] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor='white', dpi=100) plt.subplot(1, 3, 1) plt.title('Annotation histogram') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel('Annotation Count') plt.legend() plt.subplot(1, 3, 2) plt.title('Annotation histogram (logscale)') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale('log') plt.subplot(1, 3, 3) plt.title('Mean confidences') plt.bar(np.arange(n_classes), [conf.mean() for conf in human_confidences], color='gray', hatch='/', edgecolor='k', width=.4) plt.bar(np.arange(n_classes) + .4, [conf.mean() for conf in machine_confidences], color='gray', edgecolor='k', width=.4) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print('Not enough human annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Human Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) #image_fnames plt.axis('off') plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print('Not enough machine annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Auto Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis('off') plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth.pdf') as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by humans'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if (img_id_id + 1) % 100 == 0 or (img_id_id + 1) == min(len(class_ids), N_SHOW_PER_CLASS): pdf.savefig() plt.clf() print('Plotting human annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by machines'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if (img_id_id + 1) % 100 == 0 or (img_id_id + 1) == min(len(class_ids), N_SHOW_PER_CLASS): pdf.savefig() plt.clf() print('Plotting machine annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=False, cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = 'd' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split('/')[-1].split('.')[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor='white', figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Full annotation set error {:.2f}%'.format( mean_err), normalize=False) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human==1.] == groundtruth_labels[human==1.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==1.], groundtruth_labels[human==1.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Human annotation set (size {}) error {:.2f}%'.format( int(sum(human)), mean_err), normalize=False) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human==0.) > 0: plt.subplot(133) mean_err = 100 - np.mean(standard_labels[human==0.] == groundtruth_labels[human==0.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==0.], groundtruth_labels[human==0.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Auto-annotation set (size {}) error {:.2f}%'.format( int(len(human) - sum(human)), mean_err), normalize=False) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth_benchmark.pdf') as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by {}'.format(class_name, 'humans' if class_ids_id == 0 else 'machines'), fontsize=20) plt.axis('off') good_ids = class_ids[np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0]] bad_ids = class_ids[np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0]] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting annotations of {}, {}/{}...'.format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS))) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='green', fill=False, transform=ax.transAxes) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='red', fill=False, transform=ax.transAxes) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open('output.manifest', 'r') as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size*0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open('train.manifest', 'w') as f: for line in train_data: f.write(json.dumps(line)) f.write('\n') num_training_samples += 1 with open('validation.manifest', 'w') as f: for line in validation_data: f.write(json.dumps(line)) f.write('\n') ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file('train.manifest',BUCKET, EXP_NAME + '/train.manifest') s3.upload_file('validation.manifest',BUCKET, EXP_NAME + '/validation.manifest') # Create unique job name nn_job_name_prefix = 'groundtruth-augmented-manifest-demo' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri(boto3.Session().region_name, 'image-classification', repo_version='latest') training_params = \ { "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "Pipe" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1" }, "StoppingCondition": { "MaxRuntimeInSeconds": 86400 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'train.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'validation.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" } ] } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print('Transform job started') while(True): status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)['TrainingJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) model_name="groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) primary_container = { 'Image': training_image, 'ModelDataUrl': model_data, } create_model_response = sagemaker_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = \ { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": batch_input } }, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None" }, "TransformResources": { "InstanceType": "ml.p2.xlarge", "InstanceCount": 1 } } print('Transform job name: {}'.format(batch_job_name)) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while(True): response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response['TransformJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(s3_client, bucket, prefix): filename = prefix.split('/')[-1] s3_client.download_file(bucket, prefix, filename) with open(filename) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = prefix.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = list_objects(s3, BUCKET, urlparse(batch_output).path.lstrip('/')) outputs = [get_label(s3, BUCKET, prefix) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name + '-epc' + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name + '-ep' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker_client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') with open('test_inputs/{}'.format(test_images[0]), 'rb') as f: payload = f.read() payload = bytearray(payload) client = boto3.client('sagemaker-runtime') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) # `response` comes in a json format, let's unpack it. result = json.loads(response['Body'].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print('Model prediction is: {}'.format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = '<< YOUR S3 BUCKET NAME >>' assert BUCKET != '<< YOUR S3 BUCKET NAME >>', 'Please provide a custom S3 bucket name.' EXP_NAME = 'ground-truth-ic-demo' # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client('s3') bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open('openimgs-annotations.csv', 'r') as f: all_labels = [line.strip().split(',') for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims['Musical Instrument'] = [label[0] for label in all_labels if (label[2] == '/m/04szw' and label[3] == '1')][:500] ims['Fruit'] = [label[0] for label in all_labels if (label[2] == '/m/02xwb' and label[3] == '1')][:371] ims['Fruit'].remove('02a54f6864478101') # This image contains personal information, let's remove it from our dataset. ims['Cheetah'] = [label[0] for label in all_labels if (label[2] == '/m/0cd4d' and label[3] == '1')][:50] ims['Tiger'] = [label[0] for label in all_labels if (label[2] == '/m/07dm6' and label[3] == '1')][:40] ims['Snowman'] = [label[0] for label in all_labels if (label[2] == '/m/0152hh' and label[3] == '1')][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][:int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client('s3') for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print('Copying image {} / {}'.format((img_id+1), 1000)) copy_source = { 'Bucket': 'open-images-dataset', 'Key': 'test/{}.jpg'.format(img) } s3.copy(copy_source, BUCKET, '{}/images/{}.jpg'.format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = 'input.manifest' with open(manifest_name, 'w') as f: for img in itertools.chain.from_iterable(ims.values()): img_path = 's3://{}/{}/images/{}.jpg'.format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path +'"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + '/' + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) s3.upload_file('class_labels.json', BUCKET, EXP_NAME + '/class_labels.json') ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = ['https://s3.amazonaws.com/open-images-dataset/test/{}'.format(img_id) for img_id in ['0634825fc1dcc96b.jpg', '0415b6a36f3381ed.jpg', '8582cc08068e2d0f.jpg', '8728e9fa662a8921.jpg', '926d31e8cde9055e.jpg']] def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format(*img_examples, categories_str=str(CLASS_LIST) if test_template else '{{ task.input.labels | to_json | escape }}') with open(save_fname, 'w') as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname='instructions.template') s3.upload_file('instructions.template', BUCKET, EXP_NAME + '/instructions.template') ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = '<< your private workteam ARN here >>' ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(region) workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = 'What do you see: a {}?'.format(' a '.join(CLASS_LIST)) task_keywords = ['image', 'classification', 'humans'] task_title = task_description job_name = 'ground-truth-demo-' + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/instructions.template'.format(BUCKET, EXP_NAME), } } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": 's3://{}/{}/class_labels.json'.format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%S.%fZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = 's3://{}/{}/output/{}/manifests/output/output.manifest'.format(BUCKET, EXP_NAME, job_name) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open('output.manifest', 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([('-metadata' in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum['source-ref'] groundtruth_labels[datum_id] = str(datum[metakey]['class-name']) confidences[datum_id] = datum[metakey]['confidence'] human[datum_id] = int(datum[metakey]['human-annotated'] == 'yes') groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array([confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames]) machine_confidences = [confidences[np.logical_and(groundtruth_labels == clname, 1-human)] for clname in sorted_clnames] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor='white', dpi=100) plt.subplot(1, 3, 1) plt.title('Annotation histogram') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel('Annotation Count') plt.legend() plt.subplot(1, 3, 2) plt.title('Annotation histogram (logscale)') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale('log') plt.subplot(1, 3, 3) plt.title('Mean confidences') plt.bar(np.arange(n_classes), [conf.mean() for conf in human_confidences], color='gray', hatch='/', edgecolor='k', width=.4) plt.bar(np.arange(n_classes) + .4, [conf.mean() for conf in machine_confidences], color='gray', edgecolor='k', width=.4) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print('Not enough human annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Human Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) #image_fnames plt.axis('off') plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print('Not enough machine annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Auto Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis('off') plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth.pdf') as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by humans'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting human annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by machines'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting machine annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=False, cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = 'd' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split('/')[-1].split('.')[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor='white', figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Full annotation set error {:.2f}%'.format( mean_err), normalize=False) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human==1.] == groundtruth_labels[human==1.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==1.], groundtruth_labels[human==1.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Human annotation set (size {}) error {:.2f}%'.format( int(sum(human)), mean_err), normalize=False) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human==0.) > 0: plt.subplot(133) mean_err = 100 - np.mean(standard_labels[human==0.] == groundtruth_labels[human==0.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==0.], groundtruth_labels[human==0.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Auto-annotation set (size {}) error {:.2f}%'.format( int(len(human) - sum(human)), mean_err), normalize=False) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth_benchmark.pdf') as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by {}'.format(class_name, 'humans' if class_ids_id == 0 else 'machines'), fontsize=20) plt.axis('off') good_ids = class_ids[np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0]] bad_ids = class_ids[np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0]] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting annotations of {}, {}/{}...'.format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS))) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='green', fill=False, transform=ax.transAxes) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='red', fill=False, transform=ax.transAxes) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open('output.manifest', 'r') as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size*0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open('train.manifest', 'w') as f: for line in train_data: f.write(json.dumps(line)) f.write('\n') num_training_samples += 1 with open('validation.manifest', 'w') as f: for line in validation_data: f.write(json.dumps(line)) f.write('\n') ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file('train.manifest',BUCKET, EXP_NAME + '/train.manifest') s3.upload_file('validation.manifest',BUCKET, EXP_NAME + '/validation.manifest') # Create unique job name nn_job_name_prefix = 'groundtruth-augmented-manifest-demo' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri(boto3.Session().region_name, 'image-classification', repo_version='latest') training_params = \ { "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "Pipe" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1" }, "StoppingCondition": { "MaxRuntimeInSeconds": 86400 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'train.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'validation.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" } ] } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print('Transform job started') while(True): status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)['TrainingJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) model_name="groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) primary_container = { 'Image': training_image, 'ModelDataUrl': model_data, } create_model_response = sagemaker_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = \ { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": batch_input } }, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None" }, "TransformResources": { "InstanceType": "ml.p2.xlarge", "InstanceCount": 1 } } print('Transform job name: {}'.format(batch_job_name)) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while(True): response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response['TransformJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name + '-epc' + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name + '-ep' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker_client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') with open('test_inputs/{}'.format(test_images[0]), 'rb') as f: payload = f.read() payload = bytearray(payload) client = boto3.client('sagemaker-runtime') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) # `response` comes in a json format, let's unpack it. result = json.loads(response['Body'].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print('Model prediction is: {}'.format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = '<< YOUR S3 BUCKET NAME >>' assert BUCKET != '<< YOUR S3 BUCKET NAME >>', 'Please provide a custom S3 bucket name.' EXP_NAME = 'ground-truth-ic-demo' # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client('s3') bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open('openimgs-annotations.csv', 'r') as f: all_labels = [line.strip().split(',') for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims['Musical Instrument'] = [label[0] for label in all_labels if (label[2] == '/m/04szw' and label[3] == '1')][:500] ims['Fruit'] = [label[0] for label in all_labels if (label[2] == '/m/02xwb' and label[3] == '1')][:371] ims['Fruit'].remove('02a54f6864478101') # This image contains personal information, let's remove it from our dataset. ims['Cheetah'] = [label[0] for label in all_labels if (label[2] == '/m/0cd4d' and label[3] == '1')][:50] ims['Tiger'] = [label[0] for label in all_labels if (label[2] == '/m/07dm6' and label[3] == '1')][:40] ims['Snowman'] = [label[0] for label in all_labels if (label[2] == '/m/0152hh' and label[3] == '1')][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][:int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client('s3') for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print('Copying image {} / {}'.format((img_id+1), 1000)) copy_source = { 'Bucket': 'open-images-dataset', 'Key': 'test/{}.jpg'.format(img) } s3.copy(copy_source, BUCKET, '{}/images/{}.jpg'.format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = 'input.manifest' with open(manifest_name, 'w') as f: for img in itertools.chain.from_iterable(ims.values()): img_path = 's3://{}/{}/images/{}.jpg'.format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path +'"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + '/' + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) s3.upload_file('class_labels.json', BUCKET, EXP_NAME + '/class_labels.json') ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = ['https://s3.amazonaws.com/open-images-dataset/test/{}'.format(img_id) for img_id in ['0634825fc1dcc96b.jpg', '0415b6a36f3381ed.jpg', '8582cc08068e2d0f.jpg', '8728e9fa662a8921.jpg', '926d31e8cde9055e.jpg']] def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format(*img_examples, categories_str=str(CLASS_LIST) if test_template else '{{ task.input.labels | to_json | escape }}') with open(save_fname, 'w') as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname='instructions.template') s3.upload_file('instructions.template', BUCKET, EXP_NAME + '/instructions.template') ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = '<< your private workteam ARN here >>' ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(region) workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = 'What do you see: a {}?'.format(' a '.join(CLASS_LIST)) task_keywords = ['image', 'classification', 'humans'] task_title = task_description job_name = 'ground-truth-demo-' + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/instructions.template'.format(BUCKET, EXP_NAME), } } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": 's3://{}/{}/class_labels.json'.format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = 's3://{}/{}/output/{}/manifests/output/output.manifest'.format(BUCKET, EXP_NAME, job_name) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open('output.manifest', 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([('-metadata' in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum['source-ref'] groundtruth_labels[datum_id] = str(datum[metakey]['class-name']) confidences[datum_id] = datum[metakey]['confidence'] human[datum_id] = int(datum[metakey]['human-annotated'] == 'yes') groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array([confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames]) machine_confidences = [confidences[np.logical_and(groundtruth_labels == clname, 1-human)] for clname in sorted_clnames] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor='white', dpi=100) plt.subplot(1, 3, 1) plt.title('Annotation histogram') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel('Annotation Count') plt.legend() plt.subplot(1, 3, 2) plt.title('Annotation histogram (logscale)') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale('log') plt.subplot(1, 3, 3) plt.title('Mean confidences') plt.bar(np.arange(n_classes), [conf.mean() for conf in human_confidences], color='gray', hatch='/', edgecolor='k', width=.4) plt.bar(np.arange(n_classes) + .4, [conf.mean() for conf in machine_confidences], color='gray', edgecolor='k', width=.4) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print('Not enough human annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Human Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) #image_fnames plt.axis('off') plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print('Not enough machine annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Auto Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis('off') plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth.pdf') as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by humans'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting human annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by machines'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting machine annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=False, cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = 'd' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split('/')[-1].split('.')[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor='white', figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Full annotation set error {:.2f}%'.format( mean_err), normalize=False) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human==1.] == groundtruth_labels[human==1.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==1.], groundtruth_labels[human==1.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Human annotation set (size {}) error {:.2f}%'.format( int(sum(human)), mean_err), normalize=False) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human==0.) > 0: plt.subplot(133) mean_err = 100 - np.mean(standard_labels[human==0.] == groundtruth_labels[human==0.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==0.], groundtruth_labels[human==0.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Auto-annotation set (size {}) error {:.2f}%'.format( int(len(human) - sum(human)), mean_err), normalize=False) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth_benchmark.pdf') as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by {}'.format(class_name, 'humans' if class_ids_id == 0 else 'machines'), fontsize=20) plt.axis('off') good_ids = class_ids[np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0]] bad_ids = class_ids[np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0]] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting annotations of {}, {}/{}...'.format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS))) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='green', fill=False, transform=ax.transAxes) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='red', fill=False, transform=ax.transAxes) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open('output.manifest', 'r') as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size*0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open('train.manifest', 'w') as f: for line in train_data: f.write(json.dumps(line)) f.write('\n') num_training_samples += 1 with open('validation.manifest', 'w') as f: for line in validation_data: f.write(json.dumps(line)) f.write('\n') ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file('train.manifest',BUCKET, EXP_NAME + '/train.manifest') s3.upload_file('validation.manifest',BUCKET, EXP_NAME + '/validation.manifest') # Create unique job name nn_job_name_prefix = 'groundtruth-augmented-manifest-demo' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri(boto3.Session().region_name, 'image-classification', repo_version='latest') training_params = \ { "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "Pipe" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1" }, "StoppingCondition": { "MaxRuntimeInSeconds": 86400 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'train.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'validation.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" } ] } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print('Transform job started') while(True): status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)['TrainingJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) model_name="groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) primary_container = { 'Image': training_image, 'ModelDataUrl': model_data, } create_model_response = sagemaker_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = \ { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": batch_input } }, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None" }, "TransformResources": { "InstanceType": "ml.p2.xlarge", "InstanceCount": 1 } } print('Transform job name: {}'.format(batch_job_name)) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while(True): response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response['TransformJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name + '-epc' + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name + '-ep' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker_client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') with open('test_inputs/{}'.format(test_images[0]), 'rb') as f: payload = f.read() payload = bytearray(payload) client = boto3.client('sagemaker-runtime') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) # `response` comes in a json format, let's unpack it. result = json.loads(response['Body'].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print('Model prediction is: {}'.format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* The S3 bucket that you use for this demo must have a CORS policy attached. To learn more about this requirement, and how to attach a CORS policy to an S3 bucket, see [CORS Permission Requirement](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-cors-update.html).* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = '<< YOUR S3 BUCKET NAME >>' assert BUCKET != '<< YOUR S3 BUCKET NAME >>', 'Please provide a custom S3 bucket name.' EXP_NAME = 'ground-truth-ic-demo' # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client('s3') bucket_region = s3.head_bucket(Bucket=BUCKET)['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] assert bucket_region == region, "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open('openimgs-annotations.csv', 'r') as f: all_labels = [line.strip().split(',') for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims['Musical Instrument'] = [label[0] for label in all_labels if (label[2] == '/m/04szw' and label[3] == '1')][:500] ims['Fruit'] = [label[0] for label in all_labels if (label[2] == '/m/02xwb' and label[3] == '1')][:371] ims['Fruit'].remove('02a54f6864478101') # This image contains personal information, let's remove it from our dataset. ims['Cheetah'] = [label[0] for label in all_labels if (label[2] == '/m/0cd4d' and label[3] == '1')][:50] ims['Tiger'] = [label[0] for label in all_labels if (label[2] == '/m/07dm6' and label[3] == '1')][:40] ims['Snowman'] = [label[0] for label in all_labels if (label[2] == '/m/0152hh' and label[3] == '1')][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][:int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client('s3') for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print('Copying image {} / {}'.format((img_id+1), 1000)) copy_source = { 'Bucket': 'open-images-dataset', 'Key': 'test/{}.jpg'.format(img) } s3.copy(copy_source, BUCKET, '{}/images/{}.jpg'.format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = 'input.manifest' with open(manifest_name, 'w') as f: for img in itertools.chain.from_iterable(ims.values()): img_path = 's3://{}/{}/images/{}.jpg'.format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path +'"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + '/' + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = { 'labels': [{'label': label} for label in CLASS_LIST] } with open('class_labels.json', 'w') as f: json.dump(json_body, f) s3.upload_file('class_labels.json', BUCKET, EXP_NAME + '/class_labels.json') ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = ['https://s3.amazonaws.com/open-images-dataset/test/{}'.format(img_id) for img_id in ['0634825fc1dcc96b.jpg', '0415b6a36f3381ed.jpg', '8582cc08068e2d0f.jpg', '8728e9fa662a8921.jpg', '926d31e8cde9055e.jpg']] def make_template(test_template=False, save_fname='instructions.template'): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format(*img_examples, categories_str=str(CLASS_LIST) if test_template else '{{ task.input.labels | to_json | escape }}') with open(save_fname, 'w') as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname='instructions.html') make_template(test_template=False, save_fname='instructions.template') s3.upload_file('instructions.template', BUCKET, EXP_NAME + '/instructions.template') ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = '<< your private workteam ARN here >>' ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = {'us-west-2': '081040173940', 'us-east-1': '432418664414', 'us-east-2': '266458841044', 'eu-west-1': '568282634449', 'ap-northeast-1': '477331159723'} prehuman_arn = 'arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass'.format(region, ac_arn_map[region]) acs_arn = 'arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass'.format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = 'arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification'.format(region) workteam_arn = 'arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default'.format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = 'What do you see: a {}?'.format(' a '.join(CLASS_LIST)) task_keywords = ['image', 'classification', 'humans'] task_title = task_description job_name = 'ground-truth-demo-' + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": 's3://{}/{}/instructions.template'.format(BUCKET, EXP_NAME), } } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig" : { "DataSource": { "S3DataSource": { "ManifestS3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": [ "FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent" ] }, }, "OutputConfig" : { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME), }, "HumanTaskConfig" : human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": 's3://{}/{}/class_labels.json'.format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request[ "LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = 's3://{}/{}/output/{}/manifests/output/output.manifest'.format(BUCKET, EXP_NAME, job_name) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open('output.manifest', 'r') as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([('-metadata' in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum['source-ref'] groundtruth_labels[datum_id] = str(datum[metakey]['class-name']) confidences[datum_id] = datum[metakey]['confidence'] human[datum_id] = int(datum[metakey]['human-annotated'] == 'yes') groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array([confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames]) machine_confidences = [confidences[np.logical_and(groundtruth_labels == clname, 1-human)] for clname in sorted_clnames] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor='white', dpi=100) plt.subplot(1, 3, 1) plt.title('Annotation histogram') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel('Annotation Count') plt.legend() plt.subplot(1, 3, 2) plt.title('Annotation histogram (logscale)') plt.bar(range(n_classes), human_sizes, color='gray', hatch='/', edgecolor='k', label='human') plt.bar(range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color='gray', edgecolor='k', label='machine') plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale('log') plt.subplot(1, 3, 3) plt.title('Mean confidences') plt.bar(np.arange(n_classes), [conf.mean() for conf in human_confidences], color='gray', hatch='/', edgecolor='k', width=.4) plt.bar(np.arange(n_classes) + .4, [conf.mean() for conf in machine_confidences], color='gray', edgecolor='k', width=.4) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print('Not enough human annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Human Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) #image_fnames plt.axis('off') plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor='white', dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print('Not enough machine annotations to show for class: {}'.format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title('Auto Label: ' + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis('off') plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth.pdf') as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by humans'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting human annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by machines'.format(class_name), fontsize=20) plt.axis('off') class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting machine annotations of {}, {}/{}...'.format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS))) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix(cm, classes, title='Confusion matrix', normalize=False, cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = 'd' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split('/')[-1].split('.')[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor='white', figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Full annotation set error {:.2f}%'.format( mean_err), normalize=False) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human==1.] == groundtruth_labels[human==1.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==1.], groundtruth_labels[human==1.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Human annotation set (size {}) error {:.2f}%'.format( int(sum(human)), mean_err), normalize=False) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human==0.) > 0: plt.subplot(133) mean_err = 100 - np.mean(standard_labels[human==0.] == groundtruth_labels[human==0.]) * 100 cnf_matrix = confusion_matrix(standard_labels[human==0.], groundtruth_labels[human==0.]) np.set_printoptions(precision=2) plot_confusion_matrix(cnf_matrix, classes=sorted(ims.keys()), title='Auto-annotation set (size {}) error {:.2f}%'.format( int(len(human) - sum(human)), mean_err), normalize=False) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor='white', dpi=100) with PdfPages('ground_truth_benchmark.pdf') as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1-human))[0] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text(0.1, 0.5, s='Images annotated as {} by {}'.format(class_name, 'humans' if class_ids_id == 0 else 'machines'), fontsize=20) plt.axis('off') good_ids = class_ids[np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0]] bad_ids = class_ids[np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0]] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print('Plotting annotations of {}, {}/{}...'.format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS))) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect='auto') plt.axis('off') if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='green', fill=False, transform=ax.transAxes) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle((0, 0), 1, 1, lw=10, edgecolor='red', fill=False, transform=ax.transAxes) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open('output.manifest', 'r') as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size*0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open('train.manifest', 'w') as f: for line in train_data: f.write(json.dumps(line)) f.write('\n') num_training_samples += 1 with open('validation.manifest', 'w') as f: for line in validation_data: f.write(json.dumps(line)) f.write('\n') ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file('train.manifest',BUCKET, EXP_NAME + '/train.manifest') s3.upload_file('validation.manifest',BUCKET, EXP_NAME + '/validation.manifest') # Create unique job name nn_job_name_prefix = 'groundtruth-augmented-manifest-demo' timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri(boto3.Session().region_name, 'image-classification', repo_version='latest') training_params = \ { "AlgorithmSpecification": { "TrainingImage": training_image, "TrainingInputMode": "Pipe" }, "RoleArn": role, "OutputDataConfig": { "S3OutputPath": 's3://{}/{}/output/'.format(BUCKET, EXP_NAME) }, "ResourceConfig": { "InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50 }, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1" }, "StoppingCondition": { "MaxRuntimeInSeconds": 86400 }, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'train.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": 's3://{}/{}/{}'.format(BUCKET, EXP_NAME, 'validation.manifest'), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref","category"] } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None" } ] } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print('Transform job started') while(True): status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)['TrainingJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) model_name="groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info['ModelArtifacts']['S3ModelArtifacts'] print(model_data) primary_container = { 'Image': training_image, 'ModelDataUrl': model_data, } create_model_response = sagemaker_client.create_model( ModelName = model_name, ExecutionRoleArn = role, PrimaryContainer = primary_container) print(create_model_response['ModelArn']) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = \ { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", "S3Uri": batch_input } }, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None" }, "TransformResources": { "InstanceType": "ml.p2.xlarge", "InstanceCount": 1 } } print('Transform job name: {}'.format(batch_job_name)) sagemaker_client = boto3.client('sagemaker') sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while(True): response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response['TransformJobStatus'] if status == 'Completed': print("Transform job ended with status: " + status) break if status == 'Failed': message = response['FailureReason'] print('Transform failed with the following error: {}'.format(message)) raise Exception('Transform job failed') time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_config_name = job_name + '-epc' + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName = endpoint_config_name, ProductionVariants=[{ 'InstanceType':'ml.m4.xlarge', 'InitialInstanceCount':1, 'ModelName':model_name, 'VariantName':'AllTraffic'}]) print('Endpoint configuration name: {}'.format(endpoint_config_name)) print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn'])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) endpoint_name = job_name + '-ep' + timestamp print('Endpoint name: {}'.format(endpoint_name)) endpoint_params = { 'EndpointName': endpoint_name, 'EndpointConfigName': endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print('EndpointArn = {}'.format(endpoint_response['EndpointArn'])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response['EndpointStatus'] print('EndpointStatus = {}'.format(status)) # wait until the status has changed sagemaker_client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response['EndpointStatus'] print('Endpoint creation ended with EndpointStatus = {}'.format(status)) if status != 'InService': raise Exception('Endpoint creation failed.') with open('test_inputs/{}'.format(test_images[0]), 'rb') as f: payload = f.read() payload = bytearray(payload) client = boto3.client('sagemaker-runtime') response = client.invoke_endpoint(EndpointName=endpoint_name, ContentType='application/x-image', Body=payload) # `response` comes in a json format, let's unpack it. result = json.loads(response['Body'].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print('Model prediction is: {}'.format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____ ###Markdown From Unlabeled Data to a Deployed Machine Learning Model: A SageMaker Ground Truth Demonstration for Image Classification1. [Introduction](Introduction)2. [Run a Ground Truth labeling job (time: about 3h)](Run-a-Ground-Truth-labeling-job) 1. [Prepare the data](Prepare-the-data) 2. [Specify the categories](Specify-the-categories) 3. [Create the instruction template](Create-the-instruction-template) 4. [Create a private team to test your task [OPTIONAL]](Create-a-private-team-to-test-your-task-[OPTIONAL]) 5. [Define pre-built lambda functions for use in the labeling job](Define-pre-built-lambda-functions-for-use-in-the-labeling-job) 6. [Submit the Ground Truth job request](Submit-the-Ground-Truth-job-request) 1. [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]) 7. [Monitor job progress](Monitor-job-progress)3. [Analyze Ground Truth labeling job results (time: about 20min)](Analyze-Ground-Truth-labeling-job-results) 1. [Postprocess the output manifest](Postprocess-the-output-manifest) 2. [Plot class histograms](Plot-class-histograms) 3. [Plot annotated images](Plot-annotated-images) 1. [Plot a small output sample](Plot-a-small-output-sample) 2. [Plot the full results](Plot-the-full-results)4. [Compare Ground Truth results to standard labels (time: about 5min)](Compare-Ground-Truth-results-to-standard-labels) 1. [Compute accuracy](Compute-accuracy) 2. [Plot correct and incorrect annotations](Plot-correct-and-incorrect-annotations)5. [Train an image classifier using Ground Truth labels (time: about 15min)](Train-an-image-classifier-using-Ground-Truth-labels)6. [Deploy the Model (time: about 20min)](Deploy-the-Model) 1. [Create Model](Create-Model) 2. [Batch Transform](Batch-Transform) 3. [Realtime Inference](Realtime-Inference) 1. [Create Endpoint Configuration](Create-Endpoint-Configuration) 2. [Create Endpoint](Create-Endpoint) 3. [Perform Inference](Perform-Inference)7. [Review](Review) IntroductionThis sample notebook takes you through an end-to-end workflow to demonstrate the functionality of SageMaker Ground Truth. We'll start with an unlabeled image data set, acquire labels for all the images using SageMaker Ground Truth, analyze the results of the labeling job, train an image classifier, host the resulting model, and, finally, use it to make predictions. Before you begin, we highly recommend you start a Ground Truth labeling job through the AWS Console first to familiarize yourself with the workflow. The AWS Console offers less flexibility than the API, but is simple to use. Cost and runtimeYou can run this demo in two modes:1. Set `RUN_FULL_AL_DEMO = True` in the next cell to label 1000 images. This should cost about \$100 given current [Ground Truth pricing scheme](https://aws.amazon.com/sagemaker/groundtruth/pricing/). In order to reduce the cost, we will use Ground Truth's auto-labeling feature. Auto-labeling uses computer vision to learn from human responses and automatically create labels for the easiest images at a cheap price. The total end-to-end runtime should be about 4h.1. Set `RUN_FULL_AL_DEMO = False` in the next cell to label only 100 images. This should cost about \$15. **Since Ground Truth's auto-labeling feature only kicks in for datasets of 1000 images or more, this cheaper version of the demo will not use it. Some of the analysis plots might look awkward, but you should still be able to see good results on the human-annotated 100 images.** PrerequisitesTo run this notebook, you can simply execute each cell one-by-one. To understand what's happening, you'll need:* An S3 bucket you can write to -- please provide its name in the following cell. The bucket must be in the same region as this SageMaker Notebook instance. You can also change the `EXP_NAME` to any valid S3 prefix. All the files related to this experiment will be stored in that prefix of your bucket.* The S3 bucket that you use for this demo must have a CORS policy attached. To learn more about this requirement, and how to attach a CORS policy to an S3 bucket, see [CORS Permission Requirement](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-cors-update.html).* Familiarity with Python and [numpy](http://www.numpy.org/).* Basic familiarity with [AWS S3](https://docs.aws.amazon.com/s3/index.html),* Basic understanding of [AWS Sagemaker](https://aws.amazon.com/sagemaker/),* Basic familiarity with [AWS Command Line Interface (CLI)](https://aws.amazon.com/cli/) -- set it up with credentials to access the AWS account you're running this notebook from. This should work out-of-the-box on SageMaker Jupyter Notebook instances.This notebook is only tested on a SageMaker notebook instance. The runtimes given are approximate, we used an `ml.m4.xlarge` instance in our tests. However, you can likely run it on a local instance by first executing the cell below on SageMaker, and then copying the `role` string to your local copy of the notebook.NOTE: This notebook will create/remove subdirectories in its working directory. We recommend to place this notebook in its own directory before running it. ###Code %matplotlib inline %load_ext autoreload %autoreload 2 import os from collections import namedtuple from collections import defaultdict from collections import Counter import itertools import json import random import time import imageio import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from sklearn.metrics import confusion_matrix import boto3 import sagemaker from urllib.parse import urlparse BUCKET = "<< YOUR S3 BUCKET NAME >>" assert BUCKET != "<< YOUR S3 BUCKET NAME >>", "Please provide a custom S3 bucket name." EXP_NAME = "ground-truth-ic-demo" # Any valid S3 prefix. RUN_FULL_AL_DEMO = True # See 'Cost and Runtime' in the Markdown cell above! # Make sure the bucket is in the same region as this notebook. role = sagemaker.get_execution_role() region = boto3.session.Session().region_name s3 = boto3.client("s3") bucket_region = s3.head_bucket(Bucket=BUCKET)["ResponseMetadata"]["HTTPHeaders"][ "x-amz-bucket-region" ] assert ( bucket_region == region ), "You S3 bucket {} and this notebook need to be in the same region.".format(BUCKET) ###Output _____no_output_____ ###Markdown Run a Ground Truth labeling job**This section should take about 3h to complete.**We will first run a labeling job. This involves several steps: collecting the images we want labeled, specifying the possible label categories, creating instructions, and writing a labeling job specification. In addition, we highly recommend to run a (free) mock job using a private workforce before you submit any job to the public workforce. This notebook will explain how to do that as an optional step. Without using a private workforce, this section until completion of your labeling job should take about 3h. However, this may vary depending on the availability of the public annotation workforce. Prepare the dataWe will first download images and labels of a subset of the [Google Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html). These labels were [carefully verified](https://storage.googleapis.com/openimages/web/factsfigures.html). Later, will compare Ground Truth annotations to these labels. Our dataset will include images in the following categories:* Musical Instrument (500 images)* Fruit (370 images)* Cheetah (50 images)* Tiger (40 images)* Snowman (40 images)If you chose `RUN_FULL_AL_DEMO = False`, then we will choose a subset of 100 images in this dataset. This is a diverse dataset of interesting images, and should be fun for the human annotators to work with. You are free to ask the annotators to annotate any images you wish (as long as the images do not contain adult content; in which case, you must adjust the labeling job request this job produces, please check the Ground Truth documentation).We will copy these images to our local `BUCKET`, and will create the corresponding *input manifest*. The input manifest is a formatted list of the S3 locations of the images we want Ground Truth to annotate. We will upload this manifest to our S3 `BUCKET`. Disclosure regarding the Open Images Dataset V4:Open Images Dataset V4 is created by Google Inc. We have not modified the images or the accompanying annotations. You can obtain the images and the annotations [here](https://storage.googleapis.com/openimages/web/download.html). The annotations are licensed by Google Inc. under [CC BY 4.0](https://creativecommons.org/licenses/by/2.0/) license. The images are listed as having a [CC BY 2.0](https://creativecommons.org/licenses/by/2.0/) license. The following paper describes Open Images V4 in depth: from the data collection and annotation to detailed statistics about the data and evaluation of models trained on it.A. Kuznetsova, H. Rom, N. Alldrin, J. Uijlings, I. Krasin, J. Pont-Tuset, S. Kamali, S. Popov, M. Malloci, T. Duerig, and V. Ferrari.*The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale.* arXiv:1811.00982, 2018. ([link to PDF](https://arxiv.org/abs/1811.00982)) ###Code # Download and process the Open Images annotations. !wget https://storage.googleapis.com/openimages/2018_04/test/test-annotations-human-imagelabels-boxable.csv -O openimgs-annotations.csv with open("openimgs-annotations.csv", "r") as f: all_labels = [line.strip().split(",") for line in f.readlines()] # Extract image ids in each of our desired classes. ims = {} ims["Musical Instrument"] = [ label[0] for label in all_labels if (label[2] == "/m/04szw" and label[3] == "1") ][:500] ims["Fruit"] = [label[0] for label in all_labels if (label[2] == "/m/02xwb" and label[3] == "1")][ :371 ] ims["Fruit"].remove( "02a54f6864478101" ) # This image contains personal information, let's remove it from our dataset. ims["Cheetah"] = [label[0] for label in all_labels if (label[2] == "/m/0cd4d" and label[3] == "1")][ :50 ] ims["Tiger"] = [label[0] for label in all_labels if (label[2] == "/m/07dm6" and label[3] == "1")][ :40 ] ims["Snowman"] = [ label[0] for label in all_labels if (label[2] == "/m/0152hh" and label[3] == "1") ][:40] num_classes = len(ims) # If running the short version of the demo, reduce each class count 10 times. for key in ims.keys(): if RUN_FULL_AL_DEMO is False: ims[key] = set(ims[key][: int(len(ims[key]) / 10)]) else: ims[key] = set(ims[key]) # Copy the images to our local bucket. s3 = boto3.client("s3") for img_id, img in enumerate(itertools.chain.from_iterable(ims.values())): if (img_id + 1) % 10 == 0: print("Copying image {} / {}".format((img_id + 1), 1000)) copy_source = {"Bucket": "open-images-dataset", "Key": "test/{}.jpg".format(img)} s3.copy(copy_source, BUCKET, "{}/images/{}.jpg".format(EXP_NAME, img)) # Create and upload the input manifest. manifest_name = "input.manifest" with open(manifest_name, "w") as f: for img in itertools.chain.from_iterable(ims.values()): img_path = "s3://{}/{}/images/{}.jpg".format(BUCKET, EXP_NAME, img) f.write('{"source-ref": "' + img_path + '"}\n') s3.upload_file(manifest_name, BUCKET, EXP_NAME + "/" + manifest_name) ###Output _____no_output_____ ###Markdown After running the cell above, you should be able to go to `s3://BUCKET/EXP_NAME/images` in [S3 console](https://console.aws.amazon.com/s3/) and see a thousand images. We recommend you inspect the contents of these images! You can download them all to a local machine using the AWS CLI. Specify the categoriesTo run an image classification labeling job, you need to decide on a set of classes the annotators can choose from. In our case, this list is `["Musical Instrument", "Fruit", "Cheetah", "Tiger", "Snowman"]`. In your own job you can choose any list of up to 10 classes. We recommend the classes to be as unambiguous and concrete as possible. The categories should be mutually exclusive, with only one correct label per image. In addition, be careful to make the task as *objective* as possible, unless of course your intention is to obtain subjective labels.* Example good category lists: `["Human", "No Human"]`, `["Golden Retriever", "Labrador", "English Bulldog", "German Shepherd"]`, `["Car", "Train", "Ship", "Pedestrian"]`.* Example bad category lists: `["Prominent object", "Not prominent"]` (meaning unclear), `["Beautiful", "Ugly"]` (subjective), `["Dog", "Animal", "Car"]` (not mutually exclusive). To work with Ground Truth, this list needs to be converted to a .json file and uploaded to the S3 `BUCKET`.*Note: The ordering of the labels or classes in the template governs the class indices that you will see downstream in the output manifest (this numbering is zero-indexed). In other words, the class that appears second in the template will correspond to class "1" in the output. At the end of this demonstration, we will train a model and make predictions, and this class ordering is instrumental to interpreting the results.* ###Code CLASS_LIST = list(ims.keys()) print("Label space is {}".format(CLASS_LIST)) json_body = {"labels": [{"label": label} for label in CLASS_LIST]} with open("class_labels.json", "w") as f: json.dump(json_body, f) s3.upload_file("class_labels.json", BUCKET, EXP_NAME + "/class_labels.json") ###Output _____no_output_____ ###Markdown You should now see `class_labels.json` in `s3://BUCKET/EXP_NAME/`. Create the instruction templatePart or all of your images will be annotated by human annotators. It is **essential** to provide good instructions that help the annotators give you the annotations you want. Good instructions are:1. Concise. We recommend limiting verbal/textual instruction to two sentences, and focusing on clear visuals.2. Visual. In the case of image classification, we recommend providing one labeled image in each of the classes as part of the instruction.When used through the AWS Console, Ground Truth helps you create the instructions using a visual wizard. When using the API, you need to create an HTML template for your instructions. Below, we prepare a very simple but effective template and upload it to your S3 bucket.NOTE: If you use any images in your template (as we do), they need to be publicly accessible. You can enable public access to files in your S3 bucket through the S3 Console, as described in [S3 Documentation](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html). Testing your instructionsIt is very easy to create broken instructions. This might cause your labeling job to fail. However, it might also cause your job to complete with meaningless results (when the annotators have no idea what to do, or the instructions are plain wrong). We *highly recommend* that you verify that your task is correct in two ways:1. The following cell creates and uploads a file called `instructions.template` to S3. It also creates `instructions.html` that you can open in a local browser window. Please do so and inspect the resulting web page; it should correspond to what you want your annotators to see (except the actual image to annotate will not be visible).2. Run your job in a private workforce, which is a way to run a mock labeling job. We describe how to do it in [Verify your task using a private team [OPTIONAL]](Verify-your-task-using-a-private-team-[OPTIONAL]). ###Code img_examples = [ "https://s3.amazonaws.com/open-images-dataset/test/{}".format(img_id) for img_id in [ "0634825fc1dcc96b.jpg", "0415b6a36f3381ed.jpg", "8582cc08068e2d0f.jpg", "8728e9fa662a8921.jpg", "926d31e8cde9055e.jpg", ] ] def make_template(test_template=False, save_fname="instructions.template"): template = r"""<script src="https://assets.crowd.aws/crowd-html-elements.js"></script> <crowd-form> <crowd-image-classifier name="crowd-image-classifier" src="{{{{ task.input.taskObject | grant_read_access }}}}" header="Dear Annotator, please tell me what you can see in the image. Thank you!" categories="{categories_str}" > <full-instructions header="Image classification instructions"> </full-instructions> <short-instructions> <p>Dear Annotator, please tell me whether what you can see in the image. Thank you!</p> <p><img src="{}" style="max-width:100%"> <br>Example "Musical Instrument". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Fruit".</p> <p><img src="{}" style="max-width:100%"> <br>Example "Cheetah". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Tiger". </p> <p><img src="{}" style="max-width:100%"> <br>Example "Snowman". </p> </short-instructions> </crowd-image-classifier> </crowd-form>""".format( *img_examples, categories_str=str(CLASS_LIST) if test_template else "{{ task.input.labels | to_json | escape }}", ) with open(save_fname, "w") as f: f.write(template) if test_template is False: print(template) make_template(test_template=True, save_fname="instructions.html") make_template(test_template=False, save_fname="instructions.template") s3.upload_file("instructions.template", BUCKET, EXP_NAME + "/instructions.template") ###Output _____no_output_____ ###Markdown You should now be able to find your template in `s3://BUCKET/EXP_NAME/instructions.template`. Create a private team to test your task [OPTIONAL]This step requires you to use the AWS Console. However, we **highly recommend** that you follow it, especially when creating your own task with a custom dataset, label set, and template.We will create a `private workteam` and add only one user (you) to it. Then, we will modify the Ground Truth API job request to send the task to that workforce. You will then be able to see your annotation job exactly as the public annotators would see it. You can even annotate the whole dataset yourself! To create a private team:1. Go to `AWS Console > Amazon SageMaker > Labeling workforces`2. Click "Private" and then "Create private team". 3. Enter the desired name for your private workteam.4. Enter your own email address in the "Email addresses" section. 5. Enter the name of your organization and a contact email to administrate the private workteam.6. Click "Create Private Team".7. The AWS Console should now return to `AWS Console > Amazon SageMaker > Labeling workforces`. Your newly created team should be visible under "Private teams". Next to it you will see an `ARN` which is a long string that looks like `arn:aws:sagemaker:region-name-123456:workteam/private-crowd/team-name`. Copy this ARN in the cell below.8. You should get an email from `[email protected]` that contains your workforce username and password. 9. In `AWS Console > Amazon SageMaker > Labeling workforces`, click on the URL in `Labeling portal sign-in URL`. Use the email/password combination from Step 8 to log in (you will be asked to create a new, non-default password).That's it! This is your private worker's interface. When we create a verification task in [Verify your task using a private team](Verify-your-task-using-a-private-team-[OPTIONAL]) below, your task should appear in this window. You can invite your colleagues to participate in the labeling job by clicking the "Invite new workers" button.The [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-private.html) has more details on the management of private workteams. ###Code private_workteam_arn = "<< your private workteam ARN here >>" ###Output _____no_output_____ ###Markdown Define pre-built lambda functions for use in the labeling job Before we submit the request, we need to define the ARNs for four key components of the labeling job: 1) the workteam, 2) the annotation consolidation Lambda function, 3) the pre-labeling task Lambda function, and 4) the machine learning algorithm to perform auto-annotation. These functions are defined by strings with region names and AWS service account numbers, so we will define a mapping below that will enable you to run this notebook in any of our supported regions. See the official documentation for the available ARNs:* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-management-public.html) for a discussion of the workteam ARN definition. There is only one valid selection if you choose to use the public workfofce; if you elect to use a private workteam, you should check the corresponding ARN for the workteam.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_HumanTaskConfig.htmlSageMaker-Type-HumanTaskConfig-PreHumanTaskLambdaArn) for available pre-human ARNs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_AnnotationConsolidationConfig.htmlSageMaker-Type-AnnotationConsolidationConfig-AnnotationConsolidationLambdaArn) for available annotation consolidation ANRs for other workflows.* [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_LabelingJobAlgorithmsConfig.htmlSageMaker-Type-LabelingJobAlgorithmsConfig-LabelingJobAlgorithmSpecificationArn) for available auto-labeling ARNs for other workflows. ###Code # Specify ARNs for resources needed to run an image classification job. ac_arn_map = { "us-west-2": "081040173940", "us-east-1": "432418664414", "us-east-2": "266458841044", "eu-west-1": "568282634449", "ap-northeast-1": "477331159723", } prehuman_arn = "arn:aws:lambda:{}:{}:function:PRE-ImageMultiClass".format( region, ac_arn_map[region] ) acs_arn = "arn:aws:lambda:{}:{}:function:ACS-ImageMultiClass".format(region, ac_arn_map[region]) labeling_algorithm_specification_arn = "arn:aws:sagemaker:{}:027400017018:labeling-job-algorithm-specification/image-classification".format( region ) workteam_arn = "arn:aws:sagemaker:{}:394669845002:workteam/public-crowd/default".format(region) ###Output _____no_output_____ ###Markdown Submit the Ground Truth job requestThe API starts a Ground Truth job by submitting a request. The request contains the full configuration of the annotation task, and allows you to modify the fine details ofthe job that are fixed to default values when you use the AWS Console. The parameters that make up the request are described in more detail in the [SageMaker Ground Truth documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateLabelingJob.html).After you submit the request, you should be able to see the job in your AWS Console, at `Amazon SageMaker > Labeling Jobs`.You can track the progress of the job there. This job will take several hours to complete. If your jobis larger (say 100,000 images), the speed and cost benefit of auto-labeling should be larger. Verify your task using a private team [OPTIONAL]If you chose to follow the steps in [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]), then you can first verify that your task runs as expected. To do this:1. Set VERIFY_USING_PRIVATE_WORKFORCE to True in the cell below.2. Run the next two cells. This will define the task and submit it to the private workforce (to you).3. After a few minutes, you should be able to see your task in your private workforce interface [Create a private team](Create-a-private-team-to-test-your-task-[OPTIONAL]).Please verify that the task appears as you want it to appear.4. If everything is in order, change `VERIFY_USING_PRIVATE_WORKFORCE` to `False` and rerun the cell below to start the real annotation task! ###Code VERIFY_USING_PRIVATE_WORKFORCE = False USE_AUTO_LABELING = True task_description = "What do you see: a {}?".format(" a ".join(CLASS_LIST)) task_keywords = ["image", "classification", "humans"] task_title = task_description job_name = "ground-truth-demo-" + str(int(time.time())) human_task_config = { "AnnotationConsolidationConfig": { "AnnotationConsolidationLambdaArn": acs_arn, }, "PreHumanTaskLambdaArn": prehuman_arn, "MaxConcurrentTaskCount": 200, # 200 images will be sent at a time to the workteam. "NumberOfHumanWorkersPerDataObject": 3, # 3 separate workers will be required to label each image. "TaskAvailabilityLifetimeInSeconds": 21600, # Your worteam has 6 hours to complete all pending tasks. "TaskDescription": task_description, "TaskKeywords": task_keywords, "TaskTimeLimitInSeconds": 300, # Each image must be labeled within 5 minutes. "TaskTitle": task_title, "UiConfig": { "UiTemplateS3Uri": "s3://{}/{}/instructions.template".format(BUCKET, EXP_NAME), }, } if not VERIFY_USING_PRIVATE_WORKFORCE: human_task_config["PublicWorkforceTaskPrice"] = { "AmountInUsd": { "Dollars": 0, "Cents": 1, "TenthFractionsOfACent": 2, } } human_task_config["WorkteamArn"] = workteam_arn else: human_task_config["WorkteamArn"] = private_workteam_arn ground_truth_request = { "InputConfig": { "DataSource": { "S3DataSource": { "ManifestS3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, manifest_name), } }, "DataAttributes": { "ContentClassifiers": ["FreeOfPersonallyIdentifiableInformation", "FreeOfAdultContent"] }, }, "OutputConfig": { "S3OutputPath": "s3://{}/{}/output/".format(BUCKET, EXP_NAME), }, "HumanTaskConfig": human_task_config, "LabelingJobName": job_name, "RoleArn": role, "LabelAttributeName": "category", "LabelCategoryConfigS3Uri": "s3://{}/{}/class_labels.json".format(BUCKET, EXP_NAME), } if USE_AUTO_LABELING and RUN_FULL_AL_DEMO: ground_truth_request["LabelingJobAlgorithmsConfig"] = { "LabelingJobAlgorithmSpecificationArn": labeling_algorithm_specification_arn } sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_labeling_job(**ground_truth_request) ###Output _____no_output_____ ###Markdown Monitor job progressA Ground Truth job can take a few hours to complete (if your dataset is larger than 10000 images, it can take much longer than that!). One way to monitor the job's progress is through AWS Console. In this notebook, we will use Ground Truth output files and Cloud Watch logs in order to monitor the progress. You can re-evaluate the next two cells repeatedly. You can re-evaluate the next cell repeatedly. It sends a `describe_labelging_job` request which should tell you whether the job is completed or not. If it is, then 'LabelingJobStatus' will be 'Completed'. ###Code sagemaker_client.describe_labeling_job(LabelingJobName=job_name) ###Output _____no_output_____ ###Markdown The next cell extract detailed information on how your job is doing to-date. You can re-evaluate it at any time. It should give you:* The number of human and machine-annotated images in each category across the iterations of your labeling job.* The training curves of any neural network training jobs launched by Ground Truth **(only if you are running with `RUN_FULL_AL_DEMO=True`)**.* The cost of the human- and machine-annotatoed labels.To understand the pricing, study [the pricing doc](https://aws.amazon.com/sagemaker/groundtruth/pricing/) carefully. In our case, each human label costs `$0.08 + 3 * $0.012 = $0.116` and each auto-label costs `$0.08`. There is also a small added cost of using SageMaker instances for neural net training and inference during auto-labeling. However, this should be insignificant compared the other costs.If `RUN_FULL_AL_DEMO==True`, then the job will proceed in multiple iterations. * Iteration 1: Ground Truth will send out 10 images as 'probes' for human annotation. If these are succesfully annotated, proceed to Iteration 2.* Iteration 2: Send out a batch of `MaxConcurrentTaskCount - 10` (in our case, 190) images for human annotation to obtain an active learning training batch.* Iteration 3: Send out another batch of 200 images for human annotation to obtain an active learning validation set.* Iteration 4a: Train a neural net to do auto-labeling. Auto-label as many datapoints as possible. * Iteration 4b: If there is any data leftover, send out at most 200 images for human annotation.* Repeat Iteration 4a and 4b until all data is annotated.If `RUN_FULL_AL_DEMO==False`, only Iterations 1 and 2 will happen. ###Code from datetime import datetime import glob import shutil HUMAN_PRICE = 0.116 AUTO_PRICE = 0.08 try: os.makedirs('ic_output_data/', exist_ok=False) except FileExistsError: shutil.rmtree('ic_output_data/') S3_OUTPUT = boto3.client('sagemaker').describe_labeling_job(LabelingJobName=job_name)[ 'OutputConfig']['S3OutputPath'] + job_name # Download human annotation data. !aws s3 cp {S3_OUTPUT + '/annotations/worker-response'} ic_output_data/worker-response --recursive --quiet worker_times = [] worker_ids = [] # Collect the times and worker ids of all the annotation events to-date. for annot_fname in glob.glob('ic_output_data/worker-response/**', recursive=True): if annot_fname.endswith('json'): with open(annot_fname, 'r') as f: annot_data = json.load(f) for answer in annot_data['answers']: annot_time = datetime.strptime( answer['submissionTime'], '%Y-%m-%dT%H:%M:%SZ') annot_id = answer['workerId'] worker_times.append(annot_time) worker_ids.append(annot_id) sort_ids = np.argsort(worker_times) worker_times = np.array(worker_times)[sort_ids] worker_ids = np.array(worker_ids)[sort_ids] cumulative_n_annots = np.cumsum([1 for _ in worker_times]) # Count the number of annotations per unique worker id. annots_per_worker = np.zeros(worker_ids.size) ids_store = set() for worker_id_id, worker_id in enumerate(worker_ids): ids_store.add(worker_id) annots_per_worker[worker_id_id] = float( cumulative_n_annots[worker_id_id]) / len(ids_store) # Count number of human annotations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/annotations/consolidated-annotation/consolidation-response'} ic_output_data/consolidation-response --recursive --quiet consolidated_classes = defaultdict(list) consolidation_times = {} consolidated_cost_times = [] for consolidated_fname in glob.glob('ic_output_data/consolidation-response/**', recursive=True): if consolidated_fname.endswith('json'): iter_id = int(consolidated_fname.split('/')[-2][-1]) # Store the time of the most recent consolidation event as iteration time. iter_time = datetime.strptime(consolidated_fname.split('/')[-1], '%Y-%m-%d_%H:%M:%S.json') if iter_id in consolidation_times: consolidation_times[iter_id] = max(consolidation_times[iter_id], iter_time) else: consolidation_times[iter_id] = iter_time consolidated_cost_times.append(iter_time) with open(consolidated_fname, 'r') as f: consolidated_data = json.load(f) for consolidation in consolidated_data: consolidation_class = consolidation['consolidatedAnnotation']['content'][ 'category-metadata']['class-name'] consolidated_classes[iter_id].append(consolidation_class) total_human_labels = sum([len(annots) for annots in consolidated_classes.values()]) # Count the number of machine iterations in each class each iteration. !aws s3 cp {S3_OUTPUT + '/activelearning'} ic_output_data/activelearning --recursive --quiet auto_classes = defaultdict(list) auto_times = {} auto_cost_times = [] for auto_fname in glob.glob('ic_output_data/activelearning/**', recursive=True): if auto_fname.endswith('auto_annotator_output.txt'): iter_id = int(auto_fname.split('/')[-3]) with open(auto_fname, 'r') as f: annots = [' '.join(l.split()[1:]) for l in f.readlines()] for annot in annots: annot = json.loads(annot) time_str = annot['category-metadata']['creation-date'] auto_time = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%S.%f') auto_class = annot['category-metadata']['class-name'] auto_classes[iter_id].append(auto_class) if iter_id in auto_times: auto_times[iter_id] = max(auto_times[iter_id], auto_time) else: auto_times[iter_id] = auto_time auto_cost_times.append(auto_time) total_auto_labels = sum([len(annots) for annots in auto_classes.values()]) n_iters = max(len(auto_times), len(consolidation_times)) def get_training_job_data(training_job_name): logclient = boto3.client('logs') log_group_name = '/aws/sagemaker/TrainingJobs' log_stream_name = logclient.describe_log_streams(logGroupName=log_group_name, logStreamNamePrefix=training_job_name)['logStreams'][0]['logStreamName'] train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True ) events = train_log['events'] next_token = train_log['nextForwardToken'] while True: train_log = logclient.get_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True, nextToken=next_token ) if train_log['nextForwardToken'] == next_token: break events = events + train_log['events'] errors = [] for event in events: msg = event['message'] if 'Final configuration' in msg: num_samples = int(msg.split('num_training_samples\': u\'')[1].split('\'')[0]) elif 'Validation-accuracy' in msg: errors.append(float(msg.split('Validation-accuracy=')[1])) errors = 1 - np.array(errors) return num_samples, errors training_data = !aws s3 ls {S3_OUTPUT + '/training/'} --recursive training_sizes = [] training_errors = [] training_iters = [] for line in training_data: if line.split('/')[-1] == 'model.tar.gz': training_job_name = line.split('/')[-3] n_samples, errors = get_training_job_data(training_job_name) training_sizes.append(n_samples) training_errors.append(errors) training_iters.append(int(line.split('/')[-5])) plt.figure(facecolor='white', figsize=(14, 4), dpi=100) ax = plt.subplot(131) plt.title('Label counts ({} human, {} auto)'.format( total_human_labels, total_auto_labels)) cmap = plt.get_cmap('coolwarm') for iter_id in consolidated_classes.keys(): bottom = 0 class_counter = Counter(consolidated_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): if iter_id == 1: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, label=cname, color=cmap(cname_id / float(len(CLASS_LIST)-1))) else: plt.bar(iter_id, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] for iter_id in auto_classes.keys(): bottom = 0 class_counter = Counter(auto_classes[iter_id]) for cname_id, cname in enumerate(CLASS_LIST): plt.bar(iter_id + .4, class_counter[cname], width=.4, bottom=bottom, color=cmap(cname_id / float(len(CLASS_LIST)-1))) bottom += class_counter[cname] tick_labels_human = ['Iter {}, human'.format(iter_id + 1) for iter_id in range(n_iters)] tick_labels_auto = ['Iter {}, auto'.format(iter_id + 1) for iter_id in range(n_iters)] tick_locations_human = np.arange(n_iters) + 1 tick_locations_auto = tick_locations_human + .4 tick_labels = np.concatenate([[tick_labels_human[idx], tick_labels_auto[idx]] for idx in range(n_iters)]) tick_locations = np.concatenate([[tick_locations_human[idx], tick_locations_auto[idx]] for idx in range(n_iters)]) plt.xticks(tick_locations, tick_labels, rotation=90) plt.legend() plt.ylabel('Count') ax = plt.subplot(132) total_human = 0 total_auto = 0 for iter_id in range(1, n_iters + 1): cost_human = len(consolidated_classes[iter_id]) * HUMAN_PRICE cost_auto = len(auto_classes[iter_id]) * AUTO_PRICE total_human += cost_human total_auto += cost_auto plt.bar(iter_id, cost_human, width=.8, color='gray', hatch='/', edgecolor='k', label='human' if iter_id==1 else None) plt.bar(iter_id, cost_auto, bottom=cost_human, width=.8, color='gray', edgecolor='k', label='auto' if iter_id==1 else None) plt.title('Annotation costs (\${:.2f} human, \${:.2f} auto)'.format( total_human, total_auto)) plt.xlabel('Iter') plt.ylabel('Cost in dollars') plt.legend() if len(training_sizes) > 0: plt.subplot(133) plt.title('Active learning training curves') plt.grid(True) cmap = plt.get_cmap('coolwarm') n_all = len(training_sizes) for iter_id_id, (iter_id, size, errs) in enumerate(zip(training_iters, training_sizes, training_errors)): plt.plot(errs, label='Iter {}, auto'.format(iter_id + 1), color=cmap(iter_id_id / max(1, (n_all-1)))) plt.legend() plt.xscale('log') plt.xlabel('Training epoch') plt.ylabel('Validation error') ###Output _____no_output_____ ###Markdown Analyze Ground Truth labeling job results**This section should take about 20min to complete.**After the job finishes running (**make sure `sagemaker_client.describe_labeling_job` shows the job is complete!**), it is time to analyze the results. The plots in the [Monitor job progress](Monitor-job-progress) section form part of the analysis. In this section, we will gain additional insights into the results, all contained in the `output manifest`. You can find the location of the output manifest under `AWS Console > SageMaker > Labeling Jobs > [name of your job]`. We will obtain it programmatically in the cell below. Postprocess the output manifestNow that the job is complete, we will download the output manifest manfiest and postprocess it to form four arrays:* `img_uris` contains the S3 URIs of all the images that Ground Truth annotated.* `labels` contains Ground Truth's labels for each image in `img_uris`.* `confidences` contains the confidence of each label in `labels`.* `human` is a flag array that contains 1 at indices corresponding to images annotated by human annotators, and 0 at indices corresponding to images annotated by Ground Truth's automated data labeling. ###Code # Load the output manifest's annotations. OUTPUT_MANIFEST = "s3://{}/{}/output/{}/manifests/output/output.manifest".format( BUCKET, EXP_NAME, job_name ) !aws s3 cp {OUTPUT_MANIFEST} 'output.manifest' with open("output.manifest", "r") as f: output = [json.loads(line.strip()) for line in f.readlines()] # Create data arrays. img_uris = [None] * len(output) confidences = np.zeros(len(output)) groundtruth_labels = [None] * len(output) human = np.zeros(len(output)) # Find the job name the manifest corresponds to. keys = list(output[0].keys()) metakey = keys[np.where([("-metadata" in k) for k in keys])[0][0]] jobname = metakey[:-9] # Extract the data. for datum_id, datum in enumerate(output): img_uris[datum_id] = datum["source-ref"] groundtruth_labels[datum_id] = str(datum[metakey]["class-name"]) confidences[datum_id] = datum[metakey]["confidence"] human[datum_id] = int(datum[metakey]["human-annotated"] == "yes") groundtruth_labels = np.array(groundtruth_labels) ###Output _____no_output_____ ###Markdown Plot class histogramsNow, let's plot the class histograms. The next cell should produce three subplots:* The Left subplot shows the number of images annotated as belonging to each visual category. The categories will be sorted from the most to the least numerous. Each bar is divided into a 'human' and 'machine' part which shows how many images were annotated as given category by human annotators and by the automated data labeling mechanism.* The Middle subplot is the same as Left, except y-axis is in log-scale. This helps visualize unbalanced datasets where some categories contain orders of magnitude more images than other.* The Right subplot shows the average confidence of images in each category, separately for human and auto-annotated images. ###Code # Compute the number of annotations in each class. n_classes = len(set(groundtruth_labels)) sorted_clnames, class_sizes = zip(*Counter(groundtruth_labels).most_common(n_classes)) # Find ids of human-annotated images. human_sizes = [human[groundtruth_labels == clname].sum() for clname in sorted_clnames] class_sizes = np.array(class_sizes) human_sizes = np.array(human_sizes) # Compute the average annotation confidence per class. human_confidences = np.array( [confidences[np.logical_and(groundtruth_labels == clname, human)] for clname in sorted_clnames] ) machine_confidences = [ confidences[np.logical_and(groundtruth_labels == clname, 1 - human)] for clname in sorted_clnames ] # If there is no images annotated as a specific class, set the average class confidence to 0. for class_id in range(n_classes): if human_confidences[class_id].size == 0: human_confidences[class_id] = np.array([0]) if machine_confidences[class_id].size == 0: machine_confidences[class_id] = np.array([0]) plt.figure(figsize=(9, 3), facecolor="white", dpi=100) plt.subplot(1, 3, 1) plt.title("Annotation histogram") plt.bar(range(n_classes), human_sizes, color="gray", hatch="/", edgecolor="k", label="human") plt.bar( range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color="gray", edgecolor="k", label="machine", ) plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.ylabel("Annotation Count") plt.legend() plt.subplot(1, 3, 2) plt.title("Annotation histogram (logscale)") plt.bar(range(n_classes), human_sizes, color="gray", hatch="/", edgecolor="k", label="human") plt.bar( range(n_classes), class_sizes - human_sizes, bottom=human_sizes, color="gray", edgecolor="k", label="machine", ) plt.xticks(range(n_classes), sorted_clnames, rotation=90) plt.yscale("log") plt.subplot(1, 3, 3) plt.title("Mean confidences") plt.bar( np.arange(n_classes), [conf.mean() for conf in human_confidences], color="gray", hatch="/", edgecolor="k", width=0.4, ) plt.bar( np.arange(n_classes) + 0.4, [conf.mean() for conf in machine_confidences], color="gray", edgecolor="k", width=0.4, ) plt.xticks(range(n_classes), sorted_clnames, rotation=90); ###Output _____no_output_____ ###Markdown Plot annotated imagesIn any data science task, it is crucial to plot and inspect the results to check they make sense. In order to do this, we will 1. Download the input images that Ground Truth annotated.2. Split them by annotated category and whether the annotation was done by human or the auto-labeling mechanism.3. Plot images in each category and human/auto-annoated class.We will download the input images to `LOCAL_IMAGE_DIR` you can choose in the next cell. Note that if this directory already contains images with the same filenames as your Ground Truth input images, we will not re-download the images.If your dataset is large and you do not wish to download and plot **all** the images, simply set `DATASET_SIZE` to a small number. We will pick a random subset of your data for plotting. ###Code LOCAL_IMG_DIR = '<< choose a local directory name to download the images to >>' # Replace with the name of a local directory to store images. assert LOCAL_IMG_DIR != '<< choose a local directory name to download the images to >>', 'Please provide a local directory name' DATASET_SIZE = len(img_uris) # Change this to a reasonable number if your dataset much larger than 10K images. subset_ids = np.random.choice(range(len(img_uris)), DATASET_SIZE, replace=False) img_uris = [img_uris[idx] for idx in subset_ids] groundtruth_labels = groundtruth_labels[subset_ids] confidences = confidences[subset_ids] human = human[subset_ids] img_fnames = [None] * len(output) for img_uri_id, img_uri in enumerate(img_uris): target_fname = os.path.join( LOCAL_IMG_DIR, img_uri.split('/')[-1]) if not os.path.isfile(target_fname): !aws s3 cp {img_uri} {target_fname} img_fnames[img_uri_id] = target_fname ###Output _____no_output_____ ###Markdown Plot a small output sampleThe following cell will create two figures. The first plots `N_SHOW` images in each category, as annotated by humans. The second plots `N_SHOW` images in each category, as annotated by the auto-labeling mechanism. If any category contains less than `N_SHOW` images, that row will not be displayed. By default, `N_SHOW = 10`, but feel free to change this to any other small number. ###Code N_SHOW = 10 plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor="white", dpi=60) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] try: show_ids = class_ids[:N_SHOW] except ValueError: print("Not enough human annotations to show for class: {}".format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title("Human Label: " + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) # image_fnames plt.axis("off") plt.tight_layout() plt.figure(figsize=(3 * N_SHOW, 2 + 3 * n_classes), facecolor="white", dpi=100) for class_name_id, class_name in enumerate(sorted_clnames): class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[0] try: show_ids = np.random.choice(class_ids, N_SHOW, replace=False) except ValueError: print("Not enough machine annotations to show for class: {}".format(class_name)) continue for show_id_id, show_id in enumerate(show_ids): plt.subplot2grid((n_classes, N_SHOW), (class_name_id, show_id_id)) plt.title("Auto Label: " + class_name) plt.imshow(imageio.imread(img_fnames[show_id])) plt.axis("off") plt.tight_layout() ###Output _____no_output_____ ###Markdown Plot the full resultsFinally, we plot all the results to a large pdf file. The pdf (called `ground_truth.pdf`) will display 100 images per page. Each page will contain images belonging to the same category, and annotated either by human annotators or by the auto-labeling mechanism. You can use this pdf to investigate exactly which images were annotated as which class at a glance.This might take a while, and the resulting pdf might be very large. For a dataset of 1K images, the process takes only a minute and creates a 10MB-large pdf. You can set `N_SHOW_PER_CLASS` to a small number if you want to limit the max number of examples shown in each category. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor="white", dpi=100) with PdfPages("ground_truth.pdf") as pdf: for class_name in sorted_clnames: # Plot images annotated as class_name by humans. plt.clf() plt.text(0.1, 0.5, s="Images annotated as {} by humans".format(class_name), fontsize=20) plt.axis("off") class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting human annotations of {}, {}/{}...".format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS) ) ) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") pdf.savefig() # Plot images annotated as class_name by machines. plt.clf() plt.text(0.1, 0.5, s="Images annotated as {} by machines".format(class_name), fontsize=20) plt.axis("off") class_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[ 0 ] for img_id_id, img_id in enumerate(class_ids): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting machine annotations of {}, {}/{}...".format( class_name, (img_id_id + 1), min(len(class_ids), N_SHOW_PER_CLASS) ) ) plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Compare Ground Truth results to known, pre-labeled data**This section should take about 5 minutes to complete.**Sometimes (for example, when benchmarking the system) we have an alternative set of data labels available. For example, the Open Images data has already been carefully annotated by a professional annotation workforce.This allows us to perform additional analysis that compares Ground Truth labels to the known, pre-labeled data.When doing so, it is important to bear in mind that any image labels created by humanswill most likely not be 100% accurate. For this reason, it is better to think of labeling accuracy as"adherence to a particular standard / set of labels" rather than "how good (in absolute terms) are the Ground Truth labels." Compute accuracyIn this cell, we will calculate the accuracy of Ground Truth labels with respect to the standard labels. In [Prepare the data](Prepare-the-data), we created the `ims` dictionary that specifies which image belongs to each category.We will convert it to an array `standard_labels` such that `standard_labels[i]` contains the label of the `i-th` image, andshould ideally correspond to `groundtruth_labels[i]`.This will allow us to plot confusion matrices to assess how well the Ground Truth labels adhere to the standard labels. We plot a confusion matrix for the total dataset, and separate matrices for human annotations and auto-annotations. ###Code def plot_confusion_matrix( cm, classes, title="Confusion matrix", normalize=False, cmap=plt.cm.Blues ): if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm, interpolation="nearest", cmap=cmap) plt.title(title) tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=90) plt.yticks(tick_marks, classes) fmt = "d" if normalize else "d" thresh = cm.max() / 2.0 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text( j, i, format(cm[i, j].astype(int), fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black", ) plt.ylabel("True label") plt.xlabel("Predicted label") plt.tight_layout() # Convert the 'ims' dictionary (which maps class names to images) to a list of image classes. standard_labels = [] for img_uri in img_uris: img_uri = img_uri.split("/")[-1].split(".")[0] standard_label = [cname for cname, imgs_in_cname in ims.items() if img_uri in imgs_in_cname][0] standard_labels.append(standard_label) standard_labels = np.array(standard_labels) # Plot a confusion matrix for the full dataset. plt.figure(facecolor="white", figsize=(12, 4), dpi=100) plt.subplot(131) mean_err = 100 - np.mean(standard_labels == groundtruth_labels) * 100 cnf_matrix = confusion_matrix(standard_labels, groundtruth_labels) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Full annotation set error {:.2f}%".format(mean_err), normalize=False, ) # Plot a confusion matrix for human-annotated Ground Truth labels. plt.subplot(132) mean_err = 100 - np.mean(standard_labels[human == 1.0] == groundtruth_labels[human == 1.0]) * 100 cnf_matrix = confusion_matrix(standard_labels[human == 1.0], groundtruth_labels[human == 1.0]) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Human annotation set (size {}) error {:.2f}%".format(int(sum(human)), mean_err), normalize=False, ) # Plot a confusion matrix for auto-annotated Ground Truth labels. if sum(human == 0.0) > 0: plt.subplot(133) mean_err = ( 100 - np.mean(standard_labels[human == 0.0] == groundtruth_labels[human == 0.0]) * 100 ) cnf_matrix = confusion_matrix(standard_labels[human == 0.0], groundtruth_labels[human == 0.0]) np.set_printoptions(precision=2) plot_confusion_matrix( cnf_matrix, classes=sorted(ims.keys()), title="Auto-annotation set (size {}) error {:.2f}%".format( int(len(human) - sum(human)), mean_err ), normalize=False, ) ###Output _____no_output_____ ###Markdown Plot correct and incorrect annotationsThis cell repeats the plot from Plot the full results. However, it sorts the predictions into correct and incorrect, and indicates the standard label of all the incorrect predictions. ###Code N_SHOW_PER_CLASS = np.inf plt.figure(figsize=(10, 10), facecolor="white", dpi=100) with PdfPages("ground_truth_benchmark.pdf") as pdf: for class_name in sorted_clnames: human_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, human))[0] auto_ids = np.where(np.logical_and(np.array(groundtruth_labels) == class_name, 1 - human))[ 0 ] for class_ids_id, class_ids in enumerate([human_ids, auto_ids]): plt.clf() plt.text( 0.1, 0.5, s="Images annotated as {} by {}".format( class_name, "humans" if class_ids_id == 0 else "machines" ), fontsize=20, ) plt.axis("off") good_ids = class_ids[ np.where(standard_labels[class_ids] == groundtruth_labels[class_ids])[0] ] bad_ids = class_ids[ np.where(standard_labels[class_ids] != groundtruth_labels[class_ids])[0] ] for img_id_id, img_id in enumerate(np.concatenate([good_ids, bad_ids])): if img_id_id == N_SHOW_PER_CLASS: break if img_id_id % 100 == 0: pdf.savefig() plt.clf() print( "Plotting annotations of {}, {}/{}...".format( class_name, img_id_id, min(len(class_ids), N_SHOW_PER_CLASS) ) ) ax = plt.subplot(10, 10, (img_id_id % 100) + 1) plt.imshow(imageio.imread(img_fnames[img_id]), aspect="auto") plt.axis("off") if img_id_id < len(good_ids): # Draw a green border around the image. rec = matplotlib.patches.Rectangle( (0, 0), 1, 1, lw=10, edgecolor="green", fill=False, transform=ax.transAxes ) else: # Draw a red border around the image. rec = matplotlib.patches.Rectangle( (0, 0), 1, 1, lw=10, edgecolor="red", fill=False, transform=ax.transAxes ) ax.add_patch(rec) pdf.savefig() plt.clf() ###Output _____no_output_____ ###Markdown Train an image classifier using Ground Truth labelsAt this stage, we have fully labeled our dataset and we can train a machine learning model to classify images based on the categories we previously defined. We'll do so using the **augmented manifest** output of our labeling job - no additional file translation or manipulation required! For a more complete description of the augmented manifest, see our other [example notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/ground_truth_labeling_jobs/object_detection_augmented_manifest_training/object_detection_augmented_manifest_training.ipynb).**NOTE:** Training neural networks to high accuracy often requires a careful choice of hyperparameters. In this case, we hand-picked hyperparameters that work reasonably well for this dataset. The neural net should have accuracy of about **60% if you're using 100 datapoints, and over 95% if you're using 1000 datapoints.**. To train neural networks on novel data, consider using [SageMaker's model tuning / hyperparameter optimization algorithms](https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html).First, we'll split our augmented manifest into a training set and a validation set using an 80/20 split. ###Code with open("output.manifest", "r") as f: output = [json.loads(line) for line in f.readlines()] # Shuffle output in place. np.random.shuffle(output) dataset_size = len(output) train_test_split_index = round(dataset_size * 0.8) train_data = output[:train_test_split_index] validation_data = output[train_test_split_index:] num_training_samples = 0 with open("train.manifest", "w") as f: for line in train_data: f.write(json.dumps(line)) f.write("\n") num_training_samples += 1 with open("validation.manifest", "w") as f: for line in validation_data: f.write(json.dumps(line)) f.write("\n") ###Output _____no_output_____ ###Markdown Next, we'll upload these manifest files to the previously defined S3 bucket so that they can be used in the training job. ###Code s3.upload_file("train.manifest", BUCKET, EXP_NAME + "/train.manifest") s3.upload_file("validation.manifest", BUCKET, EXP_NAME + "/validation.manifest") # Create unique job name nn_job_name_prefix = "groundtruth-augmented-manifest-demo" timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) nn_job_name = nn_job_name_prefix + timestamp training_image = sagemaker.amazon.amazon_estimator.get_image_uri( boto3.Session().region_name, "image-classification", repo_version="latest" ) training_params = { "AlgorithmSpecification": {"TrainingImage": training_image, "TrainingInputMode": "Pipe"}, "RoleArn": role, "OutputDataConfig": {"S3OutputPath": "s3://{}/{}/output/".format(BUCKET, EXP_NAME)}, "ResourceConfig": {"InstanceCount": 1, "InstanceType": "ml.p3.2xlarge", "VolumeSizeInGB": 50}, "TrainingJobName": nn_job_name, "HyperParameters": { "epochs": "30", "image_shape": "3,224,224", "learning_rate": "0.01", "lr_scheduler_step": "10,20", "mini_batch_size": "32", "num_classes": str(num_classes), "num_layers": "18", "num_training_samples": str(num_training_samples), "resize": "224", "use_pretrained_model": "1", }, "StoppingCondition": {"MaxRuntimeInSeconds": 86400}, "InputDataConfig": [ { "ChannelName": "train", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, "train.manifest"), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref", "category"], } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None", }, { "ChannelName": "validation", "DataSource": { "S3DataSource": { "S3DataType": "AugmentedManifestFile", "S3Uri": "s3://{}/{}/{}".format(BUCKET, EXP_NAME, "validation.manifest"), "S3DataDistributionType": "FullyReplicated", "AttributeNames": ["source-ref", "category"], } }, "ContentType": "application/x-recordio", "RecordWrapperType": "RecordIO", "CompressionType": "None", }, ], } ###Output _____no_output_____ ###Markdown Now we create the SageMaker training job. ###Code sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_training_job(**training_params) # Confirm that the training job has started print("Transform job started") while True: status = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name)[ "TrainingJobStatus" ] if status == "Completed": print("Transform job ended with status: " + status) break if status == "Failed": message = response["FailureReason"] print("Transform failed with the following error: {}".format(message)) raise Exception("Transform job failed") time.sleep(30) ###Output _____no_output_____ ###Markdown Deploy the Model Now that we've fully labeled our dataset and have a trained model, we want to use the model to perform inference.Image classification only supports encoded .jpg and .png image formats as inference input for now. The output is the probability values for all classes encoded in JSON format, or in JSON Lines format for batch transform.This section involves several steps, Create Model - Create model for the training output Batch Transform - Create a transform job to perform batch inference. Host the model for realtime inference - Create an inference endpoint and perform realtime inference. Create Model ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) model_name = "groundtruth-demo-ic-model" + timestamp print(model_name) info = sagemaker_client.describe_training_job(TrainingJobName=nn_job_name) model_data = info["ModelArtifacts"]["S3ModelArtifacts"] print(model_data) primary_container = { "Image": training_image, "ModelDataUrl": model_data, } create_model_response = sagemaker_client.create_model( ModelName=model_name, ExecutionRoleArn=role, PrimaryContainer=primary_container ) print(create_model_response["ModelArn"]) ###Output _____no_output_____ ###Markdown Batch TransformWe now create a SageMaker Batch Transform job using the model created above to perform batch prediction. Download Test DataFirst, let's download a test image that has been held out from the training and validation data. ###Code timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime()) batch_job_name = "image-classification-model" + timestamp batch_input = 's3://{}/{}/test/'.format(BUCKET, EXP_NAME) batch_output = 's3://{}/{}/{}/output/'.format(BUCKET, EXP_NAME, batch_job_name) # Copy two images from each class, unseen by the neural net, to a local bucket. test_images = [] for class_id in ['/m/04szw', '/m/02xwb', '/m/0cd4d', '/m/07dm6', '/m/0152hh']: test_images.extend([label[0] + '.jpg' for label in all_labels if (label[2] == class_id and label[3] == '1')][-2:]) !aws s3 rm $batch_input --recursive for test_img in test_images: !aws s3 cp s3://open-images-dataset/test/{test_img} {batch_input} request = { "TransformJobName": batch_job_name, "ModelName": model_name, "MaxConcurrentTransforms": 16, "MaxPayloadInMB": 6, "BatchStrategy": "SingleRecord", "TransformOutput": { "S3OutputPath": "s3://{}/{}/{}/output/".format(BUCKET, EXP_NAME, batch_job_name) }, "TransformInput": { "DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": batch_input}}, "ContentType": "application/x-image", "SplitType": "None", "CompressionType": "None", }, "TransformResources": {"InstanceType": "ml.p2.xlarge", "InstanceCount": 1}, } print("Transform job name: {}".format(batch_job_name)) sagemaker_client = boto3.client("sagemaker") sagemaker_client.create_transform_job(**request) print("Created Transform job with name: ", batch_job_name) while True: response = sagemaker_client.describe_transform_job(TransformJobName=batch_job_name) status = response["TransformJobStatus"] if status == "Completed": print("Transform job ended with status: " + status) break if status == "Failed": message = response["FailureReason"] print("Transform failed with the following error: {}".format(message)) raise Exception("Transform job failed") time.sleep(30) ###Output _____no_output_____ ###Markdown After the job completes, let's inspect the prediction results. ###Code def get_label(out_fname): !aws s3 cp {out_fname} . print(out_fname) with open(out_fname.split('/')[-1]) as f: data = json.load(f) index = np.argmax(data['prediction']) probability = data['prediction'][index] print("Result: label - " + CLASS_LIST[index] + ", probability - " + str(probability)) input_fname = out_fname.split('/')[-1][:-4] return CLASS_LIST[index], probability, input_fname # Show prediction results. !rm test_inputs/* plt.figure(facecolor='white', figsize=(7, 15), dpi=100) outputs = !aws s3 ls {batch_output} outputs = [get_label(batch_output + prefix.split()[-1]) for prefix in outputs] outputs.sort(key=lambda pred: pred[1], reverse=True) for fname_id, (pred_cname, pred_conf, pred_fname) in enumerate(outputs): !aws s3 cp {batch_input}{pred_fname} test_inputs/{pred_fname} plt.subplot(5, 2, fname_id+1) img = imageio.imread('test_inputs/{}'.format(pred_fname)) plt.imshow(img) plt.axis('off') plt.title('{}\nconfidence={:.2f}'.format(pred_cname, pred_conf)) if RUN_FULL_AL_DEMO: warning = '' else: warning = ('\nNOTE: In this small demo we only used 80 images to train the neural network.\n' 'The predictions will be far from perfect! Set RUN_FULL_AL_DEMO=True to see properly trained results.') plt.suptitle('Predictions sorted by confidence.{}'.format(warning)) ###Output _____no_output_____ ###Markdown Realtime InferenceWe now host the model with an endpoint and perform realtime inference.This section involves several steps, Create endpoint configuration - Create a configuration defining an endpoint. Create endpoint - Use the configuration to create an inference endpoint. Perform inference - Perform inference on some input data using the endpoint. Clean up - Delete the endpoint and model Create Endpoint Configuration ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) endpoint_config_name = job_name + "-epc" + timestamp endpoint_config_response = sagemaker_client.create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=[ { "InstanceType": "ml.m4.xlarge", "InitialInstanceCount": 1, "ModelName": model_name, "VariantName": "AllTraffic", } ], ) print("Endpoint configuration name: {}".format(endpoint_config_name)) print("Endpoint configuration arn: {}".format(endpoint_config_response["EndpointConfigArn"])) ###Output _____no_output_____ ###Markdown Create EndpointLastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes about 10 minutes to complete. ###Code timestamp = time.strftime("-%Y-%m-%d-%H-%M-%S", time.gmtime()) endpoint_name = job_name + "-ep" + timestamp print("Endpoint name: {}".format(endpoint_name)) endpoint_params = { "EndpointName": endpoint_name, "EndpointConfigName": endpoint_config_name, } endpoint_response = sagemaker_client.create_endpoint(**endpoint_params) print("EndpointArn = {}".format(endpoint_response["EndpointArn"])) # get the status of the endpoint response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = response["EndpointStatus"] print("EndpointStatus = {}".format(status)) # wait until the status has changed sagemaker_client.get_waiter("endpoint_in_service").wait(EndpointName=endpoint_name) # print the status of the endpoint endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name) status = endpoint_response["EndpointStatus"] print("Endpoint creation ended with EndpointStatus = {}".format(status)) if status != "InService": raise Exception("Endpoint creation failed.") with open("test_inputs/{}".format(test_images[0]), "rb") as f: payload = f.read() payload = bytearray(payload) client = boto3.client("sagemaker-runtime") response = client.invoke_endpoint( EndpointName=endpoint_name, ContentType="application/x-image", Body=payload ) # `response` comes in a json format, let's unpack it. result = json.loads(response["Body"].read()) # The result outputs the probabilities for all classes. # Find the class with maximum probability and print the class name. print("Model prediction is: {}".format(CLASS_LIST[np.argmax(result)])) ###Output _____no_output_____ ###Markdown Finally, let's clean up and delete this endpoint. ###Code sagemaker_client.delete_endpoint(EndpointName=endpoint_name) ###Output _____no_output_____
ipynb/Togo.ipynb
###Markdown Togo* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview("Togo", weeks=5); overview("Togo"); compare_plot("Togo", normalise=True); # load the data cases, deaths = get_country_data("Togo") # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 500 rows pd.set_option("max_rows", 500) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____ ###Markdown Togo* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview("Togo", weeks=5); overview("Togo"); compare_plot("Togo", normalise=True); # load the data cases, deaths = get_country_data("Togo") # get population of the region for future normalisation: inhabitants = population("Togo") print(f'Population of "Togo": {inhabitants} people') # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 1000 rows pd.set_option("max_rows", 1000) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____ ###Markdown Togo* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb) ###Code import datetime import time start = datetime.datetime.now() print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}") %config InlineBackend.figure_formats = ['svg'] from oscovida import * overview("Togo"); # load the data cases, deaths, region_label = get_country_data("Togo") # compose into one table table = compose_dataframe_summary(cases, deaths) # show tables with up to 500 rows pd.set_option("max_rows", 500) # display the table table ###Output _____no_output_____ ###Markdown Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Togo.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))-------------------- ###Code print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and " f"deaths at {fetch_deaths_last_execution()}.") # to force a fresh download of data, run "clear_cache()" print(f"Notebook execution took: {datetime.datetime.now()-start}") ###Output _____no_output_____
LAb Data Modeling/Module5 - Lab6.ipynb
###Markdown DAT210x - Programming with Python for DS Module5- Lab6 ###Code import random, math import pandas as pd import numpy as np import scipy.io from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib from mpl_toolkits.mplot3d import Axes3D matplotlib.style.use('ggplot') # Look Pretty # Leave this alone until indicated: Test_PCA = False ###Output _____no_output_____ ###Markdown A Convenience Function This method is for your visualization convenience only. You aren't expected to know how to put this together yourself, although you should be able to follow the code by now: ###Code def Plot2DBoundary(model,DTrain, LTrain, DTest, LTest): # The dots are training samples (img not drawn), and the pics are testing samples (images drawn) # Play around with the K values. This is very controlled dataset so it should be able to get perfect classification on testing entries # Play with the K for isomap, play with the K for neighbors. fig = plt.figure() ax = fig.add_subplot(111) ax.set_title('Transformed Boundary, Image Space -> 2D') padding = 0.1 # Zoom out resolution = 1 # Don't get too detailed; smaller values (finer rez) will take longer to compute colors = ['blue','green','orange','red'] # ------ # Calculate the boundaries of the mesh grid. The mesh grid is # a standard grid (think graph paper), where each point will be # sent to the classifier (KNeighbors) to predict what class it # belongs to. This is why KNeighbors has to be trained against # 2D data, so we can produce this countour. Once we have the # label for each point on the grid, we can color it appropriately # and plot it. x_min, x_max = DTrain[:, 0].min(), DTrain[:, 0].max() y_min, y_max = DTrain[:, 1].min(), DTrain[:, 1].max() x_range = x_max - x_min y_range = y_max - y_min x_min -= x_range * padding y_min -= y_range * padding x_max += x_range * padding y_max += y_range * padding # Using the boundaries, actually make the 2D Grid Matrix: xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)) # What class does the classifier say about each spot on the chart? # The values stored in the matrix are the predictions of the model # at said location: Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the mesh grid as a filled contour plot: plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, z=-100) # ------ # When plotting the testing images, used to validate if the algorithm # is functioning correctly, size them as 5% of the overall chart size x_size = x_range * 0.05 y_size = y_range * 0.05 # First, plot the images in your TEST dataset img_num = 0 for index in LTest.index: # DTest is a regular NDArray, so you'll iterate over that 1 at a time. x0, y0 = DTest[img_num,0]-x_size/2., DTest[img_num,1]-y_size/2. x1, y1 = DTest[img_num,0]+x_size/2., DTest[img_num,1]+y_size/2. # DTest = our images isomap-transformed into 2D. But we still want # to plot the original image, so we look to the original, untouched # dataset (at index) to get the pixels: img = df.iloc[index,:].reshape(num_pixels, num_pixels) ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1), alpha=0.8) img_num += 1 # Plot your TRAINING points as well... as points rather than as images for label in range(len(np.unique(LTrain))): indices = np.where(LTrain == label) ax.scatter(DTrain[indices, 0], DTrain[indices, 1], c=colors[label], alpha=0.8, marker='o') # Plot plt.show() ###Output _____no_output_____ ###Markdown The Assignment Use the same code from Module4/assignment4.ipynb to load up the `face_data.mat` file into a dataframe called `df`. Be sure to calculate the `num_pixels` value, and to rotate the images to being right-side-up instead of sideways. This was demonstrated in the [Lab Assignment 4](https://github.com/authman/DAT210x/blob/master/Module4/assignment4.ipynb) code. ###Code mat = scipy.io.loadmat('../Module4/Datasets/face_data.mat') df = pd.DataFrame(mat['images']).T num_images, num_pixels = df.shape num_pixels = int(math.sqrt(num_pixels)) for i in range(num_images): df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1) ###Output C:\Users\sasha\Anaconda3\lib\site-packages\ipykernel\__main__.py:2: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead from ipykernel import kernelapp as app ###Markdown Load up your face_labels dataset. It only has a single column, and you're only interested in that single column. You will have to slice the column out so that you have access to it as a "Series" rather than as a "Dataframe". This was discussed in the the "Slicin'" lecture of the "Manipulating Data" reading on the course website. Use an appropriate indexer to take care of that. Be sure to print out the labels and compare what you see to the raw `face_labels.csv` so you know you loaded it correctly. ###Code y = pd.read_csv('Datasets/face_labels.csv', header=None) y = y.iloc[:, 0] ###Output _____no_output_____ ###Markdown Do `train_test_split`. Use the same code as on the EdX platform in the reading material, but set the random_state=7 for reproducibility, and the test_size to 0.15 (150%). Your labels are actually passed in as a series (instead of as an NDArray) so that you can access their underlying indices later on. This is necessary so you can find your samples in the original dataframe. The convenience methods we've written for you that handle drawing expect this, so that they can plot your testing data as images rather than as points: ###Code from sklearn.model_selection import train_test_split X_train, X_test, label_train, label_test = train_test_split(df, y, test_size=0.15, random_state=7) ###Output _____no_output_____ ###Markdown Dimensionality Reduction ###Code if Test_PCA: # INFO: PCA is used *before* KNeighbors to simplify your high dimensionality # image samples down to just 2 principal components! A lot of information # (variance) is lost during the process, as I'm sure you can imagine. But # you have to drop the dimension down to two, otherwise you wouldn't be able # to visualize a 2D decision surface / boundary. In the wild, you'd probably # leave in a lot more dimensions, which is better for higher accuracy, but # worse for visualizing the decision boundary; # # Your model should only be trained (fit) against the training data (data_train) # Once you've done this, you need use the model to transform both data_train # and data_test from their original high-D image feature space, down to 2D # TODO: Implement PCA here. ONLY train against your training data, but # transform both your training + test data, storing the results back into # data_train, and data_test. from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(X_train) data_train = pca.transform(X_train) data_test = pca.transform(X_test) else: # INFO: Isomap is used *before* KNeighbors to simplify your high dimensionality # image samples down to just 2 components! A lot of information has been is # lost during the process, as I'm sure you can imagine. But if you have # non-linear data that can be represented on a 2D manifold, you probably will # be left with a far superior dataset to use for classification. Plus by # having the images in 2D space, you can plot them as well as visualize a 2D # decision surface / boundary. In the wild, you'd probably leave in a lot more # dimensions, which is better for higher accuracy, but worse for visualizing the # decision boundary; # Your model should only be trained (fit) against the training data (data_train) # Once you've done this, you need use the model to transform both data_train # and data_test from their original high-D image feature space, down to 2D # TODO: Implement Isomap here. ONLY train against your training data, but # transform both your training + test data, storing the results back into # data_train, and data_test. from sklearn.manifold import Isomap iso = Isomap(n_neighbors=5, n_components=2) iso.fit(X_train) data_train = iso.transform(X_train) data_test = iso.transform(X_test) ###Output _____no_output_____ ###Markdown Implement `KNeighborsClassifier` here. You can use any K value from 1 through 20, so play around with it and attempt to get good accuracy. Fit the classifier against your training data and labels. ###Code from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=10) knn.fit(data_train, label_train) ###Output _____no_output_____ ###Markdown Calculate and display the accuracy of the testing set (data_test and label_test): ###Code accuracy_score = knn.score(data_test, label_test) print(accuracy_score) ###Output 0.990476190476 ###Markdown Let's chart the combined decision boundary, the training data as 2D plots, and the testing data as small images so we can visually validate performance: ###Code Plot2DBoundary(knn,data_train, label_train, data_test, label_test) ###Output C:\Users\sasha\Anaconda3\lib\site-packages\ipykernel\__main__.py:64: FutureWarning: reshape is deprecated and will raise in a subsequent release. Please use .values.reshape(...) instead ###Markdown After submitting your answers, experiment with using using PCA instead of ISOMap. Are the results what you expected? Also try tinkering around with the test/train split percentage from 10-20%. Notice anything? ###Code # .. your code changes above .. ###Output _____no_output_____
.ipynb_checkpoints/01-DataExploreAndClean-checkpoint.ipynb
###Markdown Data Exploration and CleaningBefore building the model, I want to understand the nature of the data we have. There are several data sources we're using, all index by date.**Outcome Data**- SAR response. This data set indicates whether or not there was a SAR call on a specified day.**Features**- Date features- Weather from [NOAA](https://www.ncdc.noaa.gov/cdo-web/) SAR Response Data First, let's dig into the SAR response data. How often does a mission happen? ###Code import pandas as pd import numpy as np import re import matplotlib.pyplot as plt import seaborn as sns sns.set() %matplotlib inline sar_header = ['DEM', 'date', 'name', 'responders', 'hours', 'miles'] sar_data = pd.read_csv('./raw_data/KCSARA.csv', date_parser=True, names=sar_header, header=None) sar_data = pd.DataFrame(sar_data) sar_data.head() sar_data.tail() sar_data.shape ###Output _____no_output_____ ###Markdown Missions per yearThere are NAs in the date column, we need to get rid of those ###Code sar_data = sar_data[pd.notnull(sar_data.date)] #Use a function from fast.ai to extract interesting data info def add_datepart(df, fldname, drop=True, time=False, errors="raise"): fld = df[fldname] fld_dtype = fld.dtype if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): fld_dtype = np.datetime64 if not np.issubdtype(fld_dtype, np.datetime64): df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors) targ_pre = re.sub('[Dd]ate$', '', fldname) attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] if time: attr = attr + ['Hour', 'Minute', 'Second'] for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower()) df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9 if drop: df.drop(fldname, axis=1, inplace=True) add_datepart(sar_data, 'date', drop=False) sar_data.head() missions_per_year = sar_data.Year.value_counts() missions_per_year plt.plot(missions_per_year.sort_index()) plt.show() ###Output _____no_output_____ ###Markdown Clearly some weirdness happening here, lets cut the data to remove anything from before 1980 and in the future. ###Code sar_data = sar_data[(sar_data.Year < 2020) & (sar_data.Year > 1979)] missions_per_year = sar_data.Year.value_counts() missions_per_year plt.plot(missions_per_year.sort_index()) plt.show() ###Output _____no_output_____ ###Markdown It seems to me that the data prior to 2001 is incomplete in some instances and likely follows a different model than the years proir to 2001. Let's focus this analysis on the missions after the spike. For example, in 1991 there are only missions in months 1-4, even though we know the most popular months are the summer months. This is seen in several other years as well. ###Code sar_data = sar_data[(sar_data.Year < 2020) & (sar_data.Year > 2001)] missions_per_year = sar_data.Year.value_counts() missions_per_year plt.plot(missions_per_year.sort_index()) plt.show() ###Output _____no_output_____ ###Markdown Missions per weekAs shown below, the summer weeks get the highest mission density. ###Code missions_per_week = sar_data.Week.value_counts() missions_per_week plt.plot(missions_per_week.sort_index()) plt.show() ###Output _____no_output_____ ###Markdown Clean data to include all dates and a mission classBefore we can do any ML, we need a table with all dates that indicates whether a mission happened or not. The rest of the variables in this table aren't important for our predictions. ###Code sar_data.date = sar_data.date.astype('datetime64[ns]') date_range = pd.date_range(start='1/1/2002', end='4/01/2019') clean_table = [] for d in date_range: if sar_data.date.isin([d]).any(): clean_table.append([d,1]) else: clean_table.append([d,0]) sar_clean = pd.DataFrame(clean_table) sar_clean.columns = ['date','mission'] sar_clean.head() # Add the date parts to this new tidy table add_datepart(sar_clean, 'date', drop=False) sar_clean.head() ###Output _____no_output_____ ###Markdown Weather DataWe're pulling data from two weather stations in Washington State. Boeing field is a little south of Seattle and will represent the 'city' weather, and Mount Gardner is a weather station in the Cascade Mountain Range East of Seattle and will report on the 'mountain' weather in the model.I'm not sure what to expect, poor weather will mean less people in the mountains, but it could also mean more dangerous conditions for the people that are out. ###Code boeing = pd.read_csv('./raw_data/boeing.csv', parse_dates=[2]) boeing.describe() gardner = pd.read_csv('./raw_data/gardner.csv', parse_dates=[2]) gardner.describe() print(boeing.shape, gardner.shape) print(boeing.head()) print(gardner.head()) ###Output (6309, 21) (6305, 11) STATION NAME DATE AWND FMTM PGTM \ 0 USW00024234 SEATTLE BOEING FIELD, WA US 2002-01-01 5.14 1129.0 1125.0 1 USW00024234 SEATTLE BOEING FIELD, WA US 2002-01-02 4.70 354.0 555.0 2 USW00024234 SEATTLE BOEING FIELD, WA US 2002-01-03 6.93 2031.0 1416.0 3 USW00024234 SEATTLE BOEING FIELD, WA US 2002-01-04 1.12 58.0 1113.0 4 USW00024234 SEATTLE BOEING FIELD, WA US 2002-01-05 3.80 1913.0 519.0 PRCP TAVG TMAX TMIN ... WDF2 WDF5 WSF2 WSF5 WT01 WT02 WT03 \ 0 0.63 45.0 54.0 36.0 ... 130 140 17.0 21.9 NaN NaN NaN 1 0.06 49.0 55.0 43.0 ... 130 130 14.1 17.0 NaN NaN NaN 2 0.01 47.0 55.0 39.0 ... 170 190 14.1 21.0 NaN NaN NaN 3 0.00 49.0 54.0 44.0 ... 120 240 8.1 13.0 NaN NaN NaN 4 0.26 49.0 54.0 43.0 ... 140 210 15.0 17.9 NaN NaN NaN WT05 WT08 WT10 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 NaN NaN NaN 4 NaN NaN NaN [5 rows x 21 columns] STATION NAME DATE AWND PRCP SNWD TAVG TMAX \ 0 USS0021B21S MOUNT GARDNER, WA US 2002-01-01 NaN 0.0 NaN 32.0 34.0 1 USS0021B21S MOUNT GARDNER, WA US 2002-01-02 NaN 0.4 NaN 33.0 35.0 2 USS0021B21S MOUNT GARDNER, WA US 2002-01-03 NaN 0.2 NaN 33.0 36.0 3 USS0021B21S MOUNT GARDNER, WA US 2002-01-04 NaN 0.1 NaN 33.0 36.0 4 USS0021B21S MOUNT GARDNER, WA US 2002-01-05 NaN 0.2 NaN 33.0 35.0 TMIN TOBS WESD 0 30.0 33.0 11.6 1 30.0 30.0 11.0 2 30.0 34.0 11.2 3 31.0 32.0 11.2 4 32.0 33.0 11.5 ###Markdown We want tidy data, so let's join on the date. ###Code weather = pd.merge(boeing, gardner, how='inner', on='DATE') print(weather.shape) weather.head() ###Output (6297, 31) ###Markdown The NANs for the weather codes essentially mean that that weather type didn't happen, so I'll replace them with a 0 ###Code weather = weather.fillna(0) weather.head() ###Output _____no_output_____ ###Markdown And finally merge the weather with the mission data to get our final dataset to use. ###Code data = pd.merge(sar_clean, weather, how='inner', left_on='date', right_on='DATE') print(data.shape) data.head() # Save the data table for now. data.to_csv('sar_weather_df.csv', index=False) data.columns ###Output _____no_output_____
flooding/Basement vs Street Flooding.ipynb
###Markdown Rates of Basement Flooding Calls vs. Street FloodingSeems like basement and street flooding calls have different patterns across the city. Street flooding calls seem to be more distributed throughout the city, and occur more regularly than basement calls. Looking at whether or not specific areas have much higher rates than others of basement flooding calls vs. street flooding calls. ###Code wib_comm_df = pd.read_csv('311_data/wib_calls_311_comm.csv') wos_comm_df = pd.read_csv('311_data/wos_calls_311_comm.csv') wib_comm_df.head() wib_comm_stack = wib_comm_df[wib_comm_df.columns.values[1:]].stack().reset_index() wos_comm_stack = wos_comm_df[wos_comm_df.columns.values[1:]].stack().reset_index() wib_comm_stack.head() wib_comm_grp = pd.DataFrame(wib_comm_stack.groupby(['level_1'])[0].sum()).reset_index() wib_comm_grp = wib_comm_grp.rename(columns={'level_1':'Community Area', 0: 'Basement Calls'}) wos_comm_grp = pd.DataFrame(wos_comm_stack.groupby(['level_1'])[0].sum()).reset_index() wos_comm_grp = wos_comm_grp.rename(columns={'level_1':'Community Area', 0: 'Street Calls'}) comm_grp_merge = wib_comm_grp.merge(wos_comm_grp, on='Community Area') comm_grp_merge.head() ## Making basement to street call ratio column comm_grp_merge['Basement-Street Ratio'] = comm_grp_merge['Basement Calls'] / comm_grp_merge['Street Calls'] comm_grp_merge.head() comm_more_basement = comm_grp_merge.sort_values(by='Basement-Street Ratio', ascending=False)[:15] comm_more_street = comm_grp_merge.sort_values(by='Basement-Street Ratio')[:15] comm_more_basement.head() ###Output _____no_output_____ ###Markdown Community Areas by Basement to Street Flood Call RatioFrom looking at the community areas with much more basement flooding calls than street flooding calls and vice versa, it seems like lower-income neighborhoods on the south side generally have more basement flooding calls, while higher-income neighborhoods on the north side have more for street flooding.Anecdotally, this makes sense from the area names, and overall it suggests that street flooding is more of a typical 311 call that's subject to change over neighborhoods. Basement flooding matches up with FEMA data though, so it seems like street flooding should be ignored for most analyses in favor of basement flooding to look at the actual distribution of flooding events across the city. Note for WBEZ DataThis also means that the WBEZ data is more off than previously expected because it combined both basement and street flooding. Only looking at basement flooding actualy changes the top zip codes substantially. ###Code fig, axs = plt.subplots(1,2) plt.rcParams["figure.figsize"] = [15, 5] comm_more_basement.plot(title='More Basement Flooding Areas', ax=axs[0], kind='bar',x='Community Area',y='Basement-Street Ratio') comm_more_street.plot(title='More Street Flooding Areas', ax=axs[1], kind='bar',x='Community Area',y='Basement-Street Ratio') ###Output _____no_output_____
src/notebooks/jax-tutorials/JAX_Parallelism.ipynb
###Markdown Parallel Evaluation in JAX[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/06-parallelism.ipynb)*Authors: Vladimir Mikulik & Roman Ring*In this section we will discuss the facilities built into JAX for single-program, multiple-data (SPMD) code.SPMD refers to a parallelism technique where the same computation (e.g., the forward pass of a neural net) is run on different input data (e.g., different inputs in a batch) in parallel on different devices (e.g., several TPUs).Conceptually, this is not very different from vectorisation, where the same operations occur in parallel in different parts of memory on the same device. We have already seen that vectorisation is supported in JAX as a program transformation, `jax.vmap`. JAX supports device parallelism analogously, using `jax.pmap` to transform a function written for one device into a function that runs in parallel on multiple devices. This colab will teach you all about it. Colab TPU SetupIf you're running this code in Google Colab, be sure to choose *Runtime*→*Change Runtime Type* and choose **TPU** from the Hardware Accelerator menu.Once this is done, you can run the following to set up the Colab TPU for use with JAX: ###Code import jax.tools.colab_tpu jax.tools.colab_tpu.setup_tpu() ###Output _____no_output_____ ###Markdown Next run the following to see the TPU devices you have available: ###Code import jax jax.devices() ###Output _____no_output_____ ###Markdown The basicsThe most basic use of `jax.pmap` is completely analogous to `jax.vmap`, so let's return to the convolution example from the [Vectorisation notebook](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/03-vectorization.ipynb). ###Code import numpy as np import jax.numpy as jnp x = np.arange(5) w = np.array([2., 3., 4.]) def convolve(x, w): output = [] for i in range(1, len(x)-1): output.append(jnp.dot(x[i-1:i+2], w)) return jnp.array(output) convolve(x, w) ###Output _____no_output_____ ###Markdown Now, let's convert our `convolve` function into one that runs on entire batches of data. In anticipation of spreading the batch across several devices, we'll make the batch size equal to the number of devices: ###Code n_devices = jax.local_device_count() xs = np.arange(5 * n_devices).reshape(-1, 5) ws = np.stack([w] * n_devices) xs ws ###Output _____no_output_____ ###Markdown As before, we can vectorise using `jax.vmap`: ###Code jax.vmap(convolve)(xs, ws) ###Output _____no_output_____ ###Markdown To spread out the computation across multiple devices, just replace `jax.vmap` with `jax.pmap`: ###Code jax.pmap(convolve)(xs, ws) ###Output _____no_output_____ ###Markdown Note that the parallelized `convolve` returns a `ShardedDeviceArray`. That is because the elements of this array are sharded across all of the devices used in the parallelism. If we were to run another parallel computation, the elements would stay on their respective devices, without incurring cross-device communication costs. ###Code jax.pmap(convolve)(xs, jax.pmap(convolve)(xs, ws)) ###Output _____no_output_____ ###Markdown The outputs of the inner `jax.pmap(convolve)` never left their devices when being fed into the outer `jax.pmap(convolve)`. Specifying `in_axes`Like with `vmap`, we can use `in_axes` to specify whether an argument to the parallelized function should be broadcast (`None`), or whether it should be split along a given axis. Note, however, that unlike `vmap`, only the leading axis (`0`) is supported by `pmap` at the time of writing this guide. ###Code jax.pmap(convolve, in_axes=(0, None))(xs, w) ###Output _____no_output_____ ###Markdown Notice how we get equivalent output to what we observe above with `jax.pmap(convolve)(xs, ws)`, where we manually replicated `w` when creating `ws`. Here, it is replicated via broadcasting, by specifying it as `None` in `in_axes`. Keep in mind that when calling the transformed function, the size of the specified axis in arguments must not exceed the number of devices available to the host. `pmap` and `jit``jax.pmap` JIT-compiles the function given to it as part of its operation, so there is no need to additionally `jax.jit` it. Communication between devicesThe above is enough to perform simple parallel operations, e.g. batching a simple MLP forward pass across several devices. However, sometimes we need to pass information between the devices. For example, perhaps we are interested in normalizing the output of each device so they sum to 1.For that, we can use special [collective ops](https://jax.readthedocs.io/en/latest/jax.lax.htmlparallel-operators) (such as the `jax.lax.p*` ops `psum`, `pmean`, `pmax`, ...). In order to use the collective ops we must specify the name of the `pmap`-ed axis through `axis_name` argument, and then refer to it when calling the op. Here's how to do that: ###Code def normalized_convolution(x, w): output = [] for i in range(1, len(x)-1): output.append(jnp.dot(x[i-1:i+2], w)) output = jnp.array(output) return output / jax.lax.psum(output, axis_name='p') jax.pmap(normalized_convolution, axis_name='p')(xs, ws) ###Output _____no_output_____ ###Markdown The `axis_name` is just a string label that allows collective operations like `jax.lax.psum` to refer to the axis bound by `jax.pmap`. It can be named anything you want -- in this case, `p`. This name is essentially invisible to anything but those functions, and those functions use it to know which axis to communicate across.`jax.vmap` also supports `axis_name`, which allows `jax.lax.p*` operations to be used in the vectorisation context in the same way they would be used in a `jax.pmap`: ###Code jax.vmap(normalized_convolution, axis_name='p')(xs, ws) ###Output _____no_output_____ ###Markdown Note that `normalized_convolution` will no longer work without being transformed by `jax.pmap` or `jax.vmap`, because `jax.lax.psum` expects there to be a named axis (`'p'`, in this case), and those two transformations are the only way to bind one. Nesting `jax.pmap` and `jax.vmap`The reason we specify `axis_name` as a string is so we can use collective operations when nesting `jax.pmap` and `jax.vmap`. For example:```pythonjax.vmap(jax.pmap(f, axis_name='i'), axis_name='j')```A `jax.lax.psum(..., axis_name='i')` in `f` would refer only to the pmapped axis, since they share the `axis_name`. In general, `jax.pmap` and `jax.vmap` can be nested in any order, and with themselves (so you can have a `pmap` within another `pmap`, for instance). ExampleHere's an example of a regression training loop with data parallelism, where each batch is split into sub-batches which are evaluated on separate devices.There are two places to pay attention to:* the `update()` function* the replication of parameters and splitting of data across devices.If this example is too confusing, you can find the same example, but without parallelism, in the next notebook, [State in JAX](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/07-state.ipynb). Once that example makes sense, you can compare the differences to understand how parallelism changes the picture. ###Code from typing import NamedTuple, Tuple import functools class Params(NamedTuple): weight: jnp.ndarray bias: jnp.ndarray def init(rng) -> Params: """Returns the initial model params.""" weights_key, bias_key = jax.random.split(rng) weight = jax.random.normal(weights_key, ()) bias = jax.random.normal(bias_key, ()) return Params(weight, bias) def loss_fn(params: Params, xs: jnp.ndarray, ys: jnp.ndarray) -> jnp.ndarray: """Computes the least squares error of the model's predictions on x against y.""" pred = params.weight * xs + params.bias return jnp.mean((pred - ys) ** 2) LEARNING_RATE = 0.005 # So far, the code is identical to the single-device case. Here's what's new: # Remember that the `axis_name` is just an arbitrary string label used # to later tell `jax.lax.pmean` which axis to reduce over. Here, we call it # 'num_devices', but could have used anything, so long as `pmean` used the same. @functools.partial(jax.pmap, axis_name='num_devices') def update(params: Params, xs: jnp.ndarray, ys: jnp.ndarray) -> Tuple[Params, jnp.ndarray]: """Performs one SGD update step on params using the given data.""" # Compute the gradients on the given minibatch (individually on each device). loss, grads = jax.value_and_grad(loss_fn)(params, xs, ys) # Combine the gradient across all devices (by taking their mean). grads = jax.lax.pmean(grads, axis_name='num_devices') # Also combine the loss. Unnecessary for the update, but useful for logging. loss = jax.lax.pmean(loss, axis_name='num_devices') # Each device performs its own update, but since we start with the same params # and synchronise gradients, the params stay in sync. new_params = jax.tree_multimap( lambda param, g: param - g * LEARNING_RATE, params, grads) return new_params, loss ###Output _____no_output_____ ###Markdown Here's how `update()` works:Undecorated and without the `pmean`s, `update()` takes data tensors of shape `[batch, ...]`, computes the loss function on that batch and evaluates its gradients.We want to spread the `batch` dimension across all available devices. To do that, we add a new axis using `pmap`. The arguments to the decorated `update()` thus need to have shape `[num_devices, batch_per_device, ...]`. So, to call the new `update()`, we'll need to reshape data batches so that what used to be `batch` is reshaped to `[num_devices, batch_per_device]`. That's what `split()` does below. Additionally, we'll need to replicate our model parameters, adding the `num_devices` axis. This reshaping is how a pmapped function knows which devices to send which data.At some point during the update step, we need to combine the gradients computed by each device -- otherwise, the updates performed by each device would be different. That's why we use `jax.lax.pmean` to compute the mean across the `num_devices` axis, giving us the average gradient of the batch. That average gradient is what we use to compute the update.Aside on naming: here, we use `num_devices` for the `axis_name` for didactic clarity while introducing `jax.pmap`. However, in some sense that is tautologous: any axis introduced by a pmap will represent a number of devices. Therefore, it's common to see the axis be named something semantically meaningful, like `batch`, `data` (signifying data parallelism) or `model` (signifying model parallelism). ###Code # Generate true data from y = w*x + b + noise true_w, true_b = 2, -1 xs = np.random.normal(size=(128, 1)) noise = 0.5 * np.random.normal(size=(128, 1)) ys = xs * true_w + true_b + noise # Initialise parameters and replicate across devices. params = init(jax.random.PRNGKey(123)) n_devices = jax.local_device_count() replicated_params = jax.tree_map(lambda x: jnp.array([x] * n_devices), params) ###Output _____no_output_____ ###Markdown So far, we've just constructed arrays with an additional leading dimension. The params are all still all on the host (CPU). `pmap` will communicate them to the devices when `update()` is first called, and each copy will stay on its own device subsequently. You can tell because they are a DeviceArray, not a ShardedDeviceArray: ###Code type(replicated_params.weight) ###Output _____no_output_____ ###Markdown The params will become a ShardedDeviceArray when they are returned by our pmapped `update()` (see further down). We do the same to the data: ###Code def split(arr): """Splits the first axis of `arr` evenly across the number of devices.""" return arr.reshape(n_devices, arr.shape[0] // n_devices, *arr.shape[1:]) # Reshape xs and ys for the pmapped `update()`. x_split = split(xs) y_split = split(ys) type(x_split) ###Output _____no_output_____ ###Markdown The data is just a reshaped vanilla NumPy array. Hence, it cannot be anywhere but on the host, as NumPy runs on CPU only. Since we never modify it, it will get sent to the device at each `update` call, like in a real pipeline where data is typically streamed from CPU to the device at each step. ###Code def type_after_update(name, obj): print(f"after first `update()`, `{name}` is a", type(obj)) # Actual training loop. for i in range(1000): # This is where the params and data gets communicated to devices: replicated_params, loss = update(replicated_params, x_split, y_split) # The returned `replicated_params` and `loss` are now both ShardedDeviceArrays, # indicating that they're on the devices. # `x_split`, of course, remains a NumPy array on the host. if i == 0: type_after_update('replicated_params.weight', replicated_params.weight) type_after_update('loss', loss) type_after_update('x_split', x_split) if i % 100 == 0: # Note that loss is actually an array of shape [num_devices], with identical # entries, because each device returns its copy of the loss. # So, we take the first element to print it. print(f"Step {i:3d}, loss: {loss[0]:.3f}") # Plot results. # Like the loss, the leaves of params have an extra leading dimension, # so we take the params from the first device. params = jax.device_get(jax.tree_map(lambda x: x[0], replicated_params)) import matplotlib.pyplot as plt plt.scatter(xs, ys) plt.plot(xs, params.weight * xs + params.bias, c='red', label='Model Prediction') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Aside: hosts and devices in JAXWhen running on TPU, the idea of a 'host' becomes important. A host is the CPU that manages several devices. A single host can only manage so many devices (usually 8), so when running very large parallel programs, multiple hosts are needed, and some finesse is required to manage them. ###Code jax.devices() ###Output _____no_output_____
nsfw-training_built_in_aws.ipynb
###Markdown This Notebook contains code for tuning the hyper parameters and then training the model. ###Code import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri role = get_execution_role() sess = sagemaker.Session() training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest") ###Output _____no_output_____ ###Markdown we need training and validation data in the form of record io format.Notice s3train_path and s3validation_path contains location of training data and validation data in record io format. ###Code s3train_path = 's3://project-completion-udacity/nsfw_dataset/training' s3validation_path = 's3://project-completion-udacity/nsfw_dataset/validation' train_data = sagemaker.session.s3_input( s3train_path, distribution='FullyReplicated', content_type='application/x-recordio', s3_data_type='S3Prefix' ) validation_data = sagemaker.session.s3_input( s3validation_path, distribution='FullyReplicated', content_type='application/x-recordio', s3_data_type='S3Prefix' ) data_channels = {'train': train_data, 'validation': validation_data} ###Output _____no_output_____ ###Markdown Specifying the instance for training. ###Code bucket = 'project-completion-udacity' dataset_name = 'nsfw_dataset' s3_output_location = 's3://{}/{}/output'.format(bucket, dataset_name) image_classifier = sagemaker.estimator.Estimator( training_image, role, train_instance_count=1, train_instance_type='ml.p2.xlarge', output_path=s3_output_location, sagemaker_session=sess ) num_classes=5 num_training_samples=! cat nsfw_dataset_train.lst | wc -l num_training_samples = int(num_training_samples[0]) print(num_training_samples , num_classes) ###Output 126254 5 ###Markdown Declaring base hyperparameters that we dont wish to tune. ###Code # # Learn more about the Sagemaker built-in Image Classifier hyperparameters here: https://docs.aws.amazon.com/sagemaker/latest/dg/IC-Hyperparameter.html # # These hyperparameters we won't want to change, as they define things like # # the size of the images we'll be sending for input, the number of training classes we have, etc. base_hyperparameters=dict( use_pretrained_model=1, image_shape='3,224,224', num_classes=num_classes, num_training_samples=num_training_samples, augmentation_type = 'crop_color_transform', epochs = 1 ) # # These are hyperparameters we may want to tune, as they can affect the model training success: hyperparameters={ **base_hyperparameters, **dict( learning_rate=0.001, mini_batch_size=5, ) } image_classifier.set_hyperparameters(**hyperparameters) # hyperparameters ###Output _____no_output_____ ###Markdown Declaring the hyperparameters we wish to tune. And then creating hyper parameter tuning job. ###Code from sagemaker.tuner import HyperparameterTuner, IntegerParameter, CategoricalParameter, ContinuousParameter hyperparameter_ranges = {'optimizer': CategoricalParameter(['nag', 'adam']), 'learning_rate': ContinuousParameter(0.0001, 0.01), 'mini_batch_size': IntegerParameter(15, 32), } objective_metric_name = 'validation:accuracy' tuner = HyperparameterTuner(image_classifier, objective_metric_name, hyperparameter_ranges, max_jobs=5, max_parallel_jobs=1) tuner.fit(inputs=data_channels, logs=True, include_cls_metadata=False) # best_image_classifier = sagemaker.estimator.Estimator.attach(tuner.best_training_job()) # %%time # import time # now = str(int(time.time())) # training_job_name = 'IC-' + dataset_name.replace('_', '-') + '-' + now # image_classifier.fit(inputs=data_channels, job_name=training_job_name, logs=True) # job = image_classifier.latest_training_job # model_path = f"{base_dir}/{job.name}" # print(f"\n\n Finished training! The model is available for download at: {image_classifier.output_path}/{job.name}/output/model.tar.gz") ###Output _____no_output_____
data_processing/make_read_counts/get_rsem.ipynb
###Markdown This code reads genes.results files from RSEM and extracts information (TPM, counts and effective length).The directory it expects, which is not included in this repository, has the following format:ChenRSEM/CELL_ID/rsem_output/rsem_output.genes.resultsWhere CELL_ID corresponds to the id of an individual cell. ###Code # rsem_dir = 'ChenRSEM/' # location to RSEM output files samples = os.listdir(rsem_dir) tpm_tab = pd.DataFrame() rcount_tab = pd.DataFrame() for sample in samples: rsem_tab = pd.read_csv(rsem_dir + sample + '/rsem_output/rsem_output.genes.results', index_col=0, sep='\t') tpm_tab[sample] = rsem_tab.TPM tpm_tab.index = rsem_tab.index rcount_tab[sample] = rsem_tab.expected_count rcount_tab.index = rsem_tab.index #tpm_tab.index = [x.split('.')[0] for x in tpm_tab.index] #rcount_tab.index = [x.split('.')[0] for x in rcount_tab.index] tpm_tab = tpm_tab[sorted(tpm_tab.columns)] rcount_tab = rcount_tab[sorted(rcount_tab.columns)] tpm_tab = tpm_tab.sort_index() rcount_tab = rcount_tab.sort_index() tpm_tab.index = [x.split('.')[0] for x in tpm_tab.index] rcount_tab.index = [x.split('.')[0] for x in rcount_tab.index] tpm_tab.to_csv('chen_rsem_tpm.tab', sep='\t', header=True, index=True) rcount_tab.to_csv('chen_rsem_count.tab', sep='\t', header=True, index=True) table = rcount_tab mm10_mart = pd.read_csv('mm10.ensembl_gene_symbols.tab', sep='\t', index_col=0) mm10_clean = mm10_mart.drop_duplicates() mm10_clean = mm10_clean.groupby(mm10_clean.index).first() good_genes = [x for x in table.index if x in mm10_clean.index] table_filtered = table.loc[good_genes] table_filtered.index = mm10_clean.loc[good_genes].mgi_symbol table_filtered.to_csv('chen.rsemCounts.gene_symbols.tab', sep='\t', header=True, index=True) table = pd.DataFrame() for sample in samples: tabla = pd.read_csv(os.path.join(reads_dir,sample, 'rsem_output/rsem_output.genes.results'), sep = '\t', index_col=0) table[sample] = tabla['effective_length'] table.index = [x.split('.')[0] for x in table.index] mm10_mart = pd.read_csv('Gencode/mm10.ensembl_gene_symbols.tab', sep='\t', index_col=0) mm10_clean = mm10_mart.drop_duplicates() mm10_clean = mm10_clean.groupby(mm10_clean.index).first() good_genes = [x for x in table.index if x in mm10_clean.index] table_filtered = table.loc[good_genes] table_filtered.index = mm10_clean.loc[good_genes].mgi_symbol table_filtered.to_csv('chen.effective_length.gene_symbols.tab', sep='\t', header=True, index=True) table = pd.read_csv('chen_rsem_tpm.tab', sep='\t', index_col = 0) mm10_mart = pd.read_csv('mm10.ensembl_gene_symbols.tab', sep='\t', index_col=0) mm10_clean = mm10_mart.drop_duplicates() mm10_clean = mm10_clean.groupby(mm10_clean.index).first() good_genes = [x for x in table.index if x in mm10_clean.index] table_filtered = table.loc[good_genes] table_filtered.index = mm10_clean.loc[good_genes].mgi_symbol table_filtered.to_csv('chen.tpm.gene_symbols.tab', sep='\t', header=True, index=True) ###Output _____no_output_____
src/main/python/analysis_open_data_price.ipynb
###Markdown **for target, fix 'retail price'** ###Code retail_df = origin_p_df[origin_p_df['class']=='소비자가격'].drop('class', axis=1) retail_df.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 81737 entries, 0 to 97408 Data columns (total 6 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 date 81737 non-null datetime64[ns] 1 region 81737 non-null object 2 unit_name 81737 non-null object 3 grade 81737 non-null object 4 standard_item_name 81737 non-null object 5 price 81737 non-null int32 dtypes: datetime64[ns](1), int32(1), object(4) memory usage: 4.1+ MB ###Markdown null value count ###Code retail_df.isnull().sum() retail_df.head() def get_unit(unit_name): # TODO: handle no supported unit_name return { '20KG': 200, '1.2KG': 12, '8KG': 80, '1KG': 10, '1KG(단)': 10, '1KG(1단)': 10, '500G': 5, '200G': 2, '100G': 1, '30개' : 30, '600G' : 6, '5마리' : 5, '10마리': 10, '2마리': 2, '1마리': 1, '5KG': 50 , '2KG' : 20 , '10개': 10, '1개': 1, '1L': 10, '1속': 1, '1포기': 1, }.get(unit_name, 1) def by_unit( df: pd.DataFrame): """ transform unit :return: transformed pd DataFrame """ return df.assign( 조사단위명=lambda r: r.조사단위명.map( lambda x: get_unit(x) ) ).assign( 당일조사가격=lambda x: x.당일조사가격 / x.조사단위명 ).drop("조사단위명", axis=1) retail_converted_df = by_unit(filtered_df) retail_converted_df.info() ###Output _____no_output_____ ###Markdown Distribution load cleaned data ###Code from util.build_dataset import build_master, build_origin_price, build_process_price clean_p, c_key = build_origin_price(bucket_name="production-bobsim", date="201908", prefix="clean") process_p, p_key = build_process_price(bucket_name="production-bobsim", date="201908") from util.visualize import draw_hist from util.s3_manager.manage import S3Manager import numpy as np draw_hist(np.log1p(clean_p["price"].rename("log transformed price"))) S3Manager("production-bobsim").save_plt_to_png(key="food_material_price_predict_model/image/log_price_hist.png") price = clean_p["price"] stdized_price = process_p["price"].rename("stdized_price") log_transformed_price = np.log1p(clean_p["price"].rename("log_transformed_price")) import pandas as pd from scipy.stats import skew price_df = pd.concat([price, stdized_price, log_transformed_price], axis=1) print(price_df.columns) pd.Series(skew(price_df), index=price_df.columns).rename("skew").to_frame().T ###Output _____no_output_____ ###Markdown categorical ###Code from util.geography import compose_geography composed = compose_geography(clean_p["region"]) composed (retail_df["region"].unique()).size (retail_df["region"].value_counts()/retail_df["region"].value_counts().sum()*100).rename("%").to_frame().sort_values(by="%") import pandas as pd pd.set_option('display.max_rows', 30) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) print(440/retail_df["standard_item_name"].value_counts().sum()) df =(retail_df["standard_item_name"].value_counts()/retail_df["standard_item_name"].value_counts().sum()*100).rename("%").to_frame().sort_values(by="%") high_list=df.sort_values(by='%', ascending =False).head(49).index high_list filter_df = retail_df[~retail_df["standard_item_name"].isin(high_list)] filter_df filter_series = retail_df[~retail_df["standard_item_name"].isin(high_list)]['standard_item_name'] drop_dup = filter_series.drop_duplicates() drop_list = drop_dup.to_list() drop_list replace_df = retail_df.replace(drop_list,'그외') replace_df a = list(retail_df["standard_item_name"].unique()) def filter_sparse(df: pd.DataFrame, column: str, sparse_list: list, to_word: str = "others"): filter_list = list(set(list(df[column].unique())) - set(sparse_list)) return df.replace(filter_list, to_word) a = filter_sparse(retail_df , column="standard_item_name", sparse_list= high_list) a sim = clean_p["standard_item_name"].value_counts() sparse_index = sim[sim < 440].index filtered = clean_p["standard_item_name"].apply(lambda x: "그외" if x in sparse_index else x) filtered (filtered.value_counts()/filtered.value_counts().sum()*100).rename("%").to_frame().sort_values(by="%") from util.visualize import draw_hist import seaborn as sns import matplotlib.pyplot as plt sns.set(style='darkgrid') sns.countplot(x = 'region', data = clean_p) plt.show() draw_hist(composed.value_counts()) ###Output _____no_output_____ ###Markdown Compare: numeric value( nomal vs log1p ) // Base skew_values ###Code orgin_df=pd.read_csv('https://production-bobsim.s3.ap-northeast-2.amazonaws.com/public_data/public_price/origin/csv/201908.csv', encoding='euc-kr') columns = [ "조사일자", "조사구분명", "표준품목명", "조사가격품목명", "표준품종명", "조사가격품종명", "조사등급명", "조사단위명", "당일조사가격", "조사지역명" ] filtered_df=orgin_df[columns] ###Output _____no_output_____ ###Markdown Check numeric type ###Code filtered_df.info() ###Output _____no_output_____ ###Markdown hist using displot ###Code # see distribution of numecric type plt.title('price Histogram') sns.distplot(data_df['당일조사가격']) # check skew from scipy.stats import skew features_index = filtered_df.dtypes[filtered_df.dtypes != 'object'].index # house_df에 컬럼 index를 [ ]로 입력하면 해당하는 컬럼 데이터 셋 반환. apply lambda로 skew( )호출 skew_features = filtered_df[features_index].apply(lambda x : skew(x)) # skew 정도가 1 이상인 컬럼들만 추출. skew_features_top = skew_features[skew_features > 1] print(skew_features_top.sort_values(ascending=False)) ###Output _____no_output_____ ###Markdown 로그변환 ###Code filtered_df[skew_features_top.index] = np.log1p(filtered_df[skew_features_top.index]) ###Output _____no_output_____ ###Markdown 분포도 확인 ( 결정한 이유 ) ###Code # see distribution of numecric type plt.title('price Histogram') sns.distplot(filtered_df['당일조사가격']) skew(filtered_df['당일조사가격']) ###Output _____no_output_____ ###Markdown datetime 타입에서 년, 월, 일, 시간 추출 ###Code filtered_df['조사일자'] = filtered_df.조사일자.apply(pd.to_datetime) filtered_df['year']= filtered_df.조사일자.apply(lambda x : x.year) filtered_df['month']=filtered_df.조사일자.apply(lambda x : x.month) filtered_df['day']=filtered_df.조사일자.apply(lambda x: x.day) filtered_df.info() temp1=pd.read_csv('https://production-bobsim.s3.ap-northeast-2.amazonaws.com/public_data/public_price/origin/csv/201908.csv', encoding='euc-kr') columns = [ "조사일자", "조사구분명", "표준품목명", "조사가격품목명", "표준품종명", "조사가격품종명", "조사등급명", "조사단위명", "당일조사가격", "조사지역명" ] def unit_fit(unit_name): unit=0 if unit_name=='20KG' : unit= 200 elif unit_name=='1.2KG' : unit= 12 elif unit_name=='8KG' : unit= 80 elif unit_name=='1KG' : unit=10 elif unit_name=='1KG(단)' : unit=10 elif unit_name=='1KG(1단)': unit=10 elif unit_name=='10개': unit=10 elif unit_name=='10마리': unit=10 elif unit_name=='1L' : unit=10 elif unit_name=='100G': unit= 1 elif unit_name=='1개' : unit= 1 elif unit_name=='1마리' : unit= 1 elif unit_name=='1속' : unit= 1 elif unit_name=='1포기' : unit= 1 elif unit_name=='200G' :unit=2 elif unit_name=='2마리' : unit= 2 elif unit_name=='500G' : unit= 5 else: unit = 6 return unit temp2=temp1[temp1['조사구분명']=='소비자가격'] temp2 =temp2[['조사일자', '조사가격품목명','표준품종명','조사가격품종명','표준품목명','조사지역명','조사단위명','당일조사가격']] temp2['price_unit']= temp2['조사단위명'].apply(lambda x : unit_fit(x)) temp2['averge_price']= temp2['당일조사가격']/temp2['price_unit'] # see distribution of numecric type plt.title('price Histogram') sns.distplot(temp2['averge_price']) skew(temp2['averge_price']) # 소비자가격 skew skew(temp1['당일조사가격']) # 모든가격 skew temp2['averge_price'] = np.log1p(temp2['averge_price']) print('로그변화 후 skew 값') skew(temp2['averge_price']) plt.title('price Histogram with log1p') sns.distplot(temp2['averge_price']) ###Output _____no_output_____
site/en/r1/tutorials/eager/custom_training.ipynb
###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatbility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf tf.enable_eager_execution() ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub > Note: This is an archived TF1 notebook. These are configuredto run in TF2's [compatibility mode](https://www.tensorflow.org/guide/migrate)but will run in TF1 as well. To use TF1 in Colab, use the[%tensorflow_version 1.x](https://colab.research.google.com/notebooks/tensorflow_version.ipynb)magic. In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____ ###Markdown Copyright 2018 The TensorFlow Authors. ###Code #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup ###Code import tensorflow.compat.v1 as tf ###Output _____no_output_____ ###Markdown VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language: ###Code # Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x) ###Output _____no_output_____ ###Markdown TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable. ###Code v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0 ###Output _____no_output_____ ###Markdown Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation. ###Code class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0 ###Output _____no_output_____ ###Markdown Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss. ###Code def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y)) ###Output _____no_output_____ ###Markdown Obtain training dataLet's synthesize the training data with some noise. ###Code TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise ###Output _____no_output_____ ###Markdown Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue. ###Code import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy()) ###Output _____no_output_____ ###Markdown Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves. ###Code def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db) ###Output _____no_output_____ ###Markdown Finally, let's repeatedly run through the training data and see how `W` and `b` evolve. ###Code model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show() ###Output _____no_output_____
GaussianElimination.ipynb
###Markdown Gaussian Elimination ###Code import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Assume we have a relationship betwen three variables x, y, and z, that can be related using the following three equations:$$2x - 4y + z = -3$$$$-x + 2y -2z = -3$$$$x + y + z = 6$$We can then express these relationships in the matrix form:$$\begin{bmatrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{bmatrix} \begin{bmatrix} x \\ y \\ z \end{bmatrix} =\begin{bmatrix} -3 \\ -3 \\ 6\end{bmatrix}$$Often, this relation is written in it's more condensed short-hand form (known as an augmented matrix):$$\left[\begin{matrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{matrix}\middle|\begin{matrix}-3 \\ -3 \\ 6\end{matrix}\right]$$ The idea of these problems, is to solve for the x, y and z values that satisfy the above conditions. When writing this process in a computer program, we need a very structured way of iterating and processing data. We'll want to organize our matrix in a way such that our program can solve every matrix in the same way. If we were computing this solution by hand, we'd know the following two matrices are identical statements (notice, just two rows are swapped):$$\left[\begin{matrix}1 && 0 && 0 \\0 && 0 && 1 \\0 && 1 && 0 \\\end{matrix}\middle|\begin{matrix}1 \\ 3 \\ 2\end{matrix}\right]=\left[\begin{matrix}1 && 0 && 0 \\0 && 1 && 0 \\0 && 0 && 1 \\\end{matrix}\middle|\begin{matrix}1 \\ 2 \\ 3\end{matrix}\right]$$ But, in order to create a strategic and methodically process for the computer, we'll have to organize the matrix such that our solution matrix will have 1's in the diagonal (also called the Identity Matrix). Here's how we can implement gaussian elimination with backsubstitution on the computer: Part 1: Iterate through your matrix. For each row, do the following steps: Step 1: Partial PivotEssentially, this just means we're going to include a process that will swap rows so that our final matrix will be the identity matrix.While you're iterating though the matrix, you'll need to compare the ith term of the ith row to the ith term in all the following rows. Swap the ith row with the row that has the largest ith element. ("Largest" in this case means farthest from 0, so you'll want to compare the absolute values). Step 2: Divide by the diagonal termDivide the whole row by it's diagonal term (such that the diagonal term is a one). Step 3: Add multiplesThe goal in this step is to make the matrix upper triangular (every element below the diagonal is 0).Our first row currently has a 1 in the leftmost position. We want to zero out the leftmost position for all of the following rows. So for every row after the 1st row, you'll subtract some multiple times the first row.When you repeat all of these steps on the ith row, you'll do the same thing for all of the following rows. After you repeat all of the above steps on the entirety of the matrix, your matrix should be upper triangular with ones along the diagonal. The last step is to use backsubstitution to get the identity matrix. Part 2: BacksubstitutionNow that the matrix is upper triangular, we'll want to add multiples of rows to manipulate it into the identity matrix. Our last row contains only one value, add multiples of this row to each of the preceding rows to zero out the position in this matrix for all but the last row. Move up to the second to last row, and repeat this process. I recommend implementing the basics without the partial pivot, then incorporate the partial pivot after you have a solution that works for some matrices (I've included a collection of assert statements below that will pass without the partial pivot implemented). ###Code def solve_system(matrix): """ Solves a system of linear equations Parameters: matrix(N x N+1 numpy array): the augmented matrix to be solved Returns: (N x 1 numpy array): array of the solutions to the linear equations """ N = len(matrix) matrix = matrix.astype("float64") for i in range (N): maxarray = np.array(matrix[i]) j = i + 1 k = i while j < N: if abs(maxarray[i]) < abs(matrix[j][i]): maxarray = matrix[j] k = j j = j + 1 savearray = np.array(matrix[i]) matrix[i] = maxarray matrix[k] = savearray matrix[i] = matrix[i]/matrix[i][i] j = i + 1 while j < N: matrix[j] = matrix[j] - (matrix[i] * matrix[j][i]) j = j + 1 i = i + 1 x = N - 1 answer = np.zeros(N) while x > -1: y = N ans = matrix[x][y] while y > 0: ans = ans - (matrix[x][y-1] * answer[y-1]) y = y - 1 answer[x] = ans x = x - 1 # TO DO: Implement Partial Pivot # TO DO: Divide by diagonal terms # TO DO: Implement backsubstitution #TO DO: Return solutions return answer ###Output _____no_output_____ ###Markdown I've included a detailed list of tests below. Once your code is running correctly, all of the following tests will pass. ###Code test_a = np.array([[1, 0, 3], [0, 1, 2]]) test_b = np.array([[3, 1, 5], [2, 2, 6]]) test_c = np.array([[2,1,4,1,-4], [3,4,-1,-1,3], [1,-4,1,5,9], [2,-2,1,3,7]]) test_d = np.array([[0, 1, 2], [1, 0, 3]]) test_e = np.array([[2, -4, 1, -3], [-1, 2, -2, -3], [1, 1, 1, 6]]) test_f = np.array([[1,1,1,1,12], [2, -1, -1, 1, 4], [1, -2, 1, -2, -15], [3, 3, 2, -1, 15]]) test_g = np.array([[0, 0, 1, 0, 10], [0, 1, 0, 0, 3], [1, 0, 0, 0, 4], [0, 0, 0, 1, 2]]) # These will pass without the pivot functionality assert(np.all((solve_system(test_a) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_b) - np.array([1, 2]) < 0.0001))) assert(np.all((solve_system(test_c) - np.array([2, -1, 2, 1]) < 0.0001))) assert(np.all((solve_system(test_f) - np.array([2, 4, 1, 5]) < 0.0001))) # These will NOT pass unless you have the partial pivot implemented correct assert(np.all((solve_system(test_d) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_e) - np.array([1, 2, 3]) < 0.0001))) assert(np.all((solve_system(test_g) - np.array([4, 3, 10, 2]) < 0.0001))) ###Output _____no_output_____ ###Markdown Gaussian Elimination with Substeps in LaTex This notebook is for demonstration purpose, in order to explain the Gauss-Algorithm. We will solve a linear system of equations, of the Form: $$\mathbf{A} \cdot \vec{x} = \vec{b} \Longleftrightarrow \begin{bmatrix}a_{11} & \dots & a_{1n} \\a_{21} & \dots & a_{2n} \\\vdots & \ddots & \vdots \\a_{n1} & \dots & a_{nn}\end{bmatrix} \cdot\begin{bmatrix}x_{1}\\x_{2}\\\vdots\\x_{n}\end{bmatrix} =\begin{bmatrix}b_{1}\\b_{2}\\\vdots\\b_{n}\end{bmatrix}$$ Define $\mathbf{A}$ and $\vec{b}$: ###Code A = [[3, 2, -4, 5], [2, 3, 3, 2], [1, 6, 1, 5], [1, -3, 1, 7]] #Change this, to fit your needs b = [3, 15, 14, 6] # The advantage is, this does need no package and can be executed in an shell def linearsolver(A, b): n = len(A) M = A i = 0 for x in M: x.append(b[i]) i += 1 for k in range(n): print ('Iteration ', k, ':') for i in range(k, n): if abs(M[i][k]) > abs(M[k][k]): (M[k], M[i]) = (M[i], M[k]) else: pass # Show the matrix after swapping rows for row in M: print(row) print('') for j in range(k + 1, n): q = float(M[j][k]) / M[k][k] print ('Row ', j + 1, '- (', q, ') * Row', 1 + k) for m in range(k, n + 1): M[j][m] -= q * M[k][m] # Show matrix after multiplying rows for row in M: print(row) print('') x = [0 for i in range(n)] x[n - 1] = float(M[n - 1][n]) / M[n - 1][n - 1] for i in range(n - 1, -1, -1): z = 0 for j in range(i + 1, n): z = z + float(M[i][j]) * x[j] x[i] = float(M[i][n] - z) / M[i][i] print(x) ###Output _____no_output_____ ###Markdown This code is based on a question from Stackoverflow. Now the last thing we need to do, is call that function, with our predefined arguments: ###Code linearsolver(A,b) ###Output Iteration 0 : [3, 2, -4, 5, 3] [2, 3, 3, 2, 15] [1, 6, 1, 5, 14] [1, -3, 1, 7, 6] Row 2 - ( 0.6666666666666666 ) * Row 1 Row 3 - ( 0.3333333333333333 ) * Row 1 Row 4 - ( 0.3333333333333333 ) * Row 1 [3, 2, -4, 5, 3] [0.0, 1.6666666666666667, 5.666666666666666, -1.333333333333333, 13.0] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -3.6666666666666665, 2.333333333333333, 5.333333333333334, 5.0] Iteration 1 : [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, 1.6666666666666667, 5.666666666666666, -1.333333333333333, 13.0] [0.0, -3.6666666666666665, 2.333333333333333, 5.333333333333334, 5.0] Row 3 - ( 0.31250000000000006 ) * Row 2 Row 4 - ( -0.6875 ) * Row 2 [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -2.220446049250313e-16, 4.937499999999999, -2.375, 8.9375] [0.0, 0.0, 3.9374999999999996, 7.625000000000001, 13.9375] Iteration 2 : [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -2.220446049250313e-16, 4.937499999999999, -2.375, 8.9375] [0.0, 0.0, 3.9374999999999996, 7.625000000000001, 13.9375] Row 4 - ( 0.7974683544303798 ) * Row 3 [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -2.220446049250313e-16, 4.937499999999999, -2.375, 8.9375] [0.0, 0.0, 0.0, 9.518987341772153, 6.810126582278481] Iteration 3 : [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -2.220446049250313e-16, 4.937499999999999, -2.375, 8.9375] [0.0, 0.0, 0.0, 9.518987341772153, 6.810126582278481] [3, 2, -4, 5, 3] [0.0, 5.333333333333333, 2.333333333333333, 3.3333333333333335, 13.0] [0.0, -2.220446049250313e-16, 4.937499999999999, -2.375, 8.9375] [0.0, 0.0, 0.0, 9.518987341772153, 6.810126582278481] [1.9813829787234043, 1.0478723404255321, 2.154255319148936, 0.7154255319148936] ###Markdown I tried my best, to format the Output, in a nice looking way, but this is a Notebook and we can use latex, so we can do much better (with an increased effort): $\underline{\mathbf{Note:}}$ Somehow A and b need to be redefined, even so I thought I left them untouched from Cell 1 ###Code from IPython.display import Latex # in order to output as latex def convertToRoman(n): result = '' for (arabic, roman) in zip((1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1), 'M CM D CD C XC L XL X IX V IV I'.split()): result += n // arabic * roman n %= arabic return result def linearsolverWithLaTex(A, b): precision = 4 n = len(A) M = A latex = '$$\\mathbf{A} \\vec{x} = \\vec{b} \\Longleftrightarrow' # this string will contain all the formatted output in the end latex += '\\begin{bmatrix}' for row in M: for l in range(0, n): latex += ('{0:.' + str(precision) + 'f}').format(row[l]) \ + ' &' latex = ''.join(latex.split())[:-1] latex += '\\\\' latex += '\\end{bmatrix} \\cdot \\begin{bmatrix}' for g in range(1, n + 1): latex += 'x_{' + str(g) + '} \\\\' latex += '\\end{bmatrix} = \\begin{bmatrix}' for h in range(0, n): latex += ('{0:.' + str(precision) + 'f}').format(b[h]) + '\\\\' latex += \ '\\end{bmatrix}$$ $$\\rightarrow \\mathit{Swap\\; rows\\; if\\; needed\\; or\\; convenient\\;} \\mathbf{(Pivoting)}$$' conversion = '' i = 0 for x in M: x.append(b[i]) i += 1 for k in range(n): latex += '$\\underline{\\text{Iteration }\\;' + str(k) + ':}$' for g in range(0, k + 1): conversion += '\\\\' for i in range(k, n): if abs(M[i][k]) > abs(M[k][k]): (M[k], M[i]) = (M[i], M[k]) else: pass # Show the matrix after swapping rows latex += '$$\\left( \\left. \\begin{matrix}' for row in M: for l in range(0, n): latex += ('{0:.' + str(precision) + 'f}' ).format(row[l]) + ' &' latex = ''.join(latex.split())[:-1] latex += '\\\\' latex += '\\end{matrix} \\right| \\begin{matrix}' for row in M: latex += ('{0:.' + str(precision) + 'f}').format(row[n]) \ + ' &' latex = ''.join(latex.split())[:-1] latex += '\\\\' latex += '\\end{matrix} \\right)' for j in range(k + 1, n): cache = '' q = float(M[j][k]) / M[k][k] conversion += '\\mathrm{' + convertToRoman(j + 1) + '}- (' \ + ('{0:.' + str(precision) + 'f}').format(q) \ + ") \\cdot \mathrm{" + convertToRoman(1 + k) + '}\\\\' for m in range(k, n + 1): M[j][m] -= q * M[k][m] # Show matrix after multiplying rows cache += '$$\\left( \\left. \\begin{matrix}' for row in M: for l in range(0, n): cache += ('{0:.' + str(precision) + 'f}' ).format(row[l]) + ' &' cache = ''.join(cache.split())[:-1] cache += '\\\\' cache += '\\end{matrix} \\right| \\begin{matrix}' for row in M: cache += ('{0:.' + str(precision) + 'f}').format(row[n]) \ + ' &' cache = ''.join(cache.split())[:-1] cache += '\\\\' latex += '\\begin{matrix}' + conversion \ + '\\end{matrix}$$ $$ \\mathrm{} $$' + cache \ + '\\end{matrix} \\right) \\mathit{Swap\\; rows\\; if\\; needed\\; or\\; convenient\\;} \\mathbf{(Pivoting)}$$ $$ \\mathrm{} $$' conversion = '' cache = '' x = [0 for i in range(n)] x[n - 1] = float(M[n - 1][n]) / M[n - 1][n - 1] for i in range(n - 1, -1, -1): z = 0 for j in range(i + 1, n): z = z + float(M[i][j]) * x[j] x[i] = float(M[i][n] - z) / M[i][i] latex += '$$ \\Longrightarrow \\vec{x} \\approx \\begin{bmatrix}' for h in range(0, n): latex += ('{0:.' + str(precision) + 'f}').format(x[h]) + '\\\\' latex += '\\end{bmatrix}$$' return latex A = [[3, 2, -4, 5], [2, 3, 3, 2], [1, 6, 1, 5], [1, -3, 1, 7]] # Change this, to fit your needs b = [3, 15, 14, 6] Latex(linearsolverWithLaTex(A,b)) #get the string from the executed function and parse it ###Output _____no_output_____ ###Markdown The nice thing is, the unformatted code can easily be copy-pasted into latex. In order to get it as a String, simply execute the following code: ###Code A = [[3, 2, -4, 5], [2, 3, 3, 2], [1, 6, 1, 5], [1, -3, 1, 7]] # Change this, to fit your needs b = [3, 15, 14, 6] print(linearsolverWithLaTex(A,b)) #get an unformatted String, to paste into latex ###Output $$\mathbf{A}\vec{x}=\vec{b}\Longleftrightarrow\begin{bmatrix}3.0000&2.0000&-4.0000&5.0000\\2.0000&3.0000&3.0000&2.0000\\1.0000&6.0000&1.0000&5.0000\\1.0000&-3.0000&1.0000&7.0000\\\end{bmatrix}\cdot\begin{bmatrix}x_{1}\\x_{2}\\x_{3}\\x_{4}\\\end{bmatrix}=\begin{bmatrix}3.0000\\15.0000\\14.0000\\6.0000\\\end{bmatrix}$$$$\rightarrow\mathit{Swap\;rows\;if\;needed\;or\;convenient\;}\mathbf{(Pivoting)}$$$\underline{\text{Iteration}\;0:}$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\2.0000&3.0000&3.0000&2.0000\\1.0000&6.0000&1.0000&5.0000\\1.0000&-3.0000&1.0000&7.0000\\\end{matrix}\right|\begin{matrix}3.0000\\15.0000\\14.0000\\6.0000\\\end{matrix}\right)\begin{matrix}\\\mathrm{II}-(0.6667)\cdot\mathrm{I}\\\mathrm{III}-(0.3333)\cdot\mathrm{I}\\\mathrm{IV}-(0.3333)\cdot\mathrm{I}\\\end{matrix}$$$$\mathrm{}$$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&1.6667&5.6667&-1.3333\\0.0000&5.3333&2.3333&3.3333\\0.0000&-3.6667&2.3333&5.3333\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\13.0000\\5.0000\\\end{matrix}\right)\mathit{Swap\;rows\;if\;needed\;or\;convenient\;}\mathbf{(Pivoting)}$$$$\mathrm{}$$$\underline{\text{Iteration}\;1:}$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&1.6667&5.6667&-1.3333\\0.0000&-3.6667&2.3333&5.3333\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\13.0000\\5.0000\\\end{matrix}\right)\begin{matrix}\\\\\mathrm{III}-(0.3125)\cdot\mathrm{II}\\\mathrm{IV}-(-0.6875)\cdot\mathrm{II}\\\end{matrix}$$$$\mathrm{}$$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&-0.0000&4.9375&-2.3750\\0.0000&0.0000&3.9375&7.6250\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\8.9375\\13.9375\\\end{matrix}\right)\mathit{Swap\;rows\;if\;needed\;or\;convenient\;}\mathbf{(Pivoting)}$$$$\mathrm{}$$$\underline{\text{Iteration}\;2:}$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&-0.0000&4.9375&-2.3750\\0.0000&0.0000&3.9375&7.6250\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\8.9375\\13.9375\\\end{matrix}\right)\begin{matrix}\\\\\\\mathrm{IV}-(0.7975)\cdot\mathrm{III}\\\end{matrix}$$$$\mathrm{}$$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&-0.0000&4.9375&-2.3750\\0.0000&0.0000&0.0000&9.5190\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\8.9375\\6.8101\\\end{matrix}\right)\mathit{Swap\;rows\;if\;needed\;or\;convenient\;}\mathbf{(Pivoting)}$$$$\mathrm{}$$$\underline{\text{Iteration}\;3:}$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&-0.0000&4.9375&-2.3750\\0.0000&0.0000&0.0000&9.5190\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\8.9375\\6.8101\\\end{matrix} \right)\begin{matrix}\\\\\\\\\end{matrix}$$ $$ \mathrm{} $$$$\left(\left.\begin{matrix}3.0000&2.0000&-4.0000&5.0000\\0.0000&5.3333&2.3333&3.3333\\0.0000&-0.0000&4.9375&-2.3750\\0.0000&0.0000&0.0000&9.5190\\\end{matrix}\right|\begin{matrix}3.0000\\13.0000\\8.9375\\6.8101\\\end{matrix} \right) \mathit{Swap\; rows\; if\; needed\; or\; convenient\;} \mathbf{(Pivoting)}$$ $$ \mathrm{} $$$$ \Longrightarrow \vec{x} \approx \begin{bmatrix}1.9814\\1.0479\\2.1543\\0.7154\\\end{bmatrix}$$ ###Markdown Gaussian Elimination ###Code import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Assume we have a relationship betwen three variables x, y, and z, that can be related using the following three equations:$$2x - 4y + z = -3$$$$-x + 2y -2z = -3$$$$x + y + z = 6$$We can then express these relationships in the matrix form:$$\begin{bmatrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{bmatrix} \begin{bmatrix} x \\ y \\ z \end{bmatrix} =\begin{bmatrix} -3 \\ -3 \\ 6\end{bmatrix}$$Often, this relation is written in it's more condensed short-hand form (known as an augmented matrix):$$\left[\begin{matrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{matrix}\middle|\begin{matrix}-3 \\ -3 \\ 6\end{matrix}\right]$$ The idea of these problems, is to solve for the x, y and z values that satisfy the above conditions. When writing this process in a computer program, we need a very structured way of iterating and processing data. We'll want to organize our matrix in a way such that our program can solve every matrix in the same way. If we were computing this solution by hand, we'd know the following two matrices are identical statements (notice, just two rows are swapped):$$\left[\begin{matrix}1 && 0 && 0 \\0 && 0 && 1 \\0 && 1 && 0 \\\end{matrix}\middle|\begin{matrix}1 \\ 3 \\ 2\end{matrix}\right]=\left[\begin{matrix}1 && 0 && 0 \\0 && 1 && 0 \\0 && 0 && 1 \\\end{matrix}\middle|\begin{matrix}1 \\ 2 \\ 3\end{matrix}\right]$$ But, in order to create a strategic and methodically process for the computer, we'll have to organize the matrix such that our solution matrix will have 1's in the diagonal (also called the Identity Matrix). Here's how we can implement gaussian elimination with backsubstitution on the computer: Part 1: Iterate through your matrix. For each row, do the following steps: Step 1: Partial PivotEssentially, this just means we're going to include a process that will swap rows so that our final matrix will be the identity matrix.While you're iterating though the matrix, you'll need to compare the ith term of the ith row to the ith term in all the following rows. Swap the ith row with the row that has the largest ith element. ("Largest" in this case means farthest from 0, so you'll want to compare the absolute values). Step 2: Divide by the diagonal termDivide the whole row by it's diagonal term (such that the diagonal term is a one). Step 3: Add multiplesThe goal in this step is to make the matrix upper triangular (every element below the diagonal is 0).Our first row currently has a 1 in the leftmost position. We want to zero out the leftmost position for all of the following rows. So for every row after the 1st row, you'll subtract some multiple times the first row.When you repeat all of these steps on the ith row, you'll do the same thing for all of the following rows. After you repeat all of the above steps on the entirety of the matrix, your matrix should be upper triangular with ones along the diagonal. The last step is to use backsubstitution to get the identity matrix. Part 2: BacksubstitutionNow that the matrix is upper triangular, we'll want to add multiples of rows to manipulate it into the identity matrix. Our last row contains only one value, add multiples of this row to each of the preceding rows to zero out the position in this matrix for all but the last row. Move up to the second to last row, and repeat this process. I recommend implementing the basics without the partial pivot, then incorporate the partial pivot after you have a solution that works for some matrices (I've included a collection of assert statements below that will pass without the partial pivot implemented). ###Code def solve_system(matrix): """ Solves a system of linear equations Parameters: matrix(N x N+1 numpy array): the augmented matrix to be solved Returns: (N x 1 numpy array): array of the solutions to the linear equations """ N = len(matrix) matrix = matrix.astype("float64") for i in range(N): for j in range(i+1, N): if abs(matrix[i][i]) < abs(matrix[j][i]): for k in range(N+1): temp = matrix[i][k] matrix[i][k] = matrix[j][k] matrix[j][k] = temp matrix[i] = matrix[i]/matrix[i][i] for j in range(i+1, N): matrix[j] = matrix[j] - matrix[j][i]*matrix[i] # upper triangular code for i in range(N-1,-1,-1): for j in range(i-1, -1,-1): matrix[j] = matrix[j] - matrix[j][i]*matrix[i] solution = np.array([]) for i in range(N): solution = np.append(solution, matrix[i][N]) # print(matrix) return solution ###Output _____no_output_____ ###Markdown I've included a detailed list of tests below. Once your code is running correctly, all of the following tests will pass. ###Code test_a = np.array([[1, 0, 3], [0, 1, 2]]) test_b = np.array([[3, 1, 5], [2, 2, 6]]) test_c = np.array([[2,1,4,1,-4], [3,4,-1,-1,3], [1,-4,1,5,9], [2,-2,1,3,7]]) test_d = np.array([[0, 1, 2], [1, 0, 3]]) test_e = np.array([[2, -4, 1, -3], [-1, 2, -2, -3], [1, 1, 1, 6]]) test_f = np.array([[1,1,1,1,12], [2, -1, -1, 1, 4], [1, -2, 1, -2, -15], [3, 3, 2, -1, 15]]) test_g = np.array([[0, 0, 1, 0, 10], [0, 1, 0, 0, 3], [1, 0, 0, 0, 4], [0, 0, 0, 1, 2]]) # solve_system(test_c) # These will pass without the pivot functionality assert(np.all((solve_system(test_a) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_b) - np.array([1, 2]) < 0.0001))) assert(np.all((solve_system(test_c) - np.array([2, -1, 2, 1]) < 0.0001))) assert(np.all((solve_system(test_f) - np.array([2, 4, 1, 5]) < 0.0001))) # These will NOT pass unless you have the partial pivot implemented correct assert(np.all((solve_system(test_d) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_e) - np.array([1, 2, 3]) < 0.0001))) assert(np.all((solve_system(test_g) - np.array([4, 3, 10, 2]) < 0.0001))) ###Output _____no_output_____ ###Markdown Gaussian Elimination ###Code import numpy as np import matplotlib.pyplot as plt import math ###Output _____no_output_____ ###Markdown Assume we have a relationship betwen three variables x, y, and z, that can be related using the following three equations:$$2x - 4y + z = -3$$$$-x + 2y -2z = -3$$$$x + y + z = 6$$We can then express these relationships in the matrix form:$$\begin{bmatrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{bmatrix} \begin{bmatrix} x \\ y \\ z \end{bmatrix} =\begin{bmatrix} -3 \\ -3 \\ 6\end{bmatrix}$$Often, this relation is written in it's more condensed short-hand form (known as an augmented matrix):$$\left[\begin{matrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{matrix}\middle|\begin{matrix}-3 \\ -3 \\ 6\end{matrix}\right]$$ The idea of these problems, is to solve for the x, y and z values that satisfy the above conditions. When writing this process in a computer program, we need a very structured way of iterating and processing data. We'll want to organize our matrix in a way such that our program can solve every matrix in the same way. If we were computing this solution by hand, we'd know the following two matrices are identical statements (notice, just two rows are swapped):$$\left[\begin{matrix}1 && 0 && 0 \\0 && 0 && 1 \\0 && 1 && 0 \\\end{matrix}\middle|\begin{matrix}1 \\ 3 \\ 2\end{matrix}\right]=\left[\begin{matrix}1 && 0 && 0 \\0 && 1 && 0 \\0 && 0 && 1 \\\end{matrix}\middle|\begin{matrix}1 \\ 2 \\ 3\end{matrix}\right]$$ But, in order to create a strategic and methodically process for the computer, we'll have to organize the matrix such that our solution matrix will have 1's in the diagonal (also called the Identity Matrix). Here's how we can implement gaussian elimination with backsubstitution on the computer: Part 1: Iterate through your matrix. For each row, do the following steps: Step 1: Partial PivotEssentially, this just means we're going to include a process that will swap rows so that our final matrix will be the identity matrix.While you're iterating though the matrix, you'll need to compare the ith term of the ith row to the ith term in all the following rows. Swap the ith row with the row that has the largest ith element. ("Largest" in this case means farthest from 0, so you'll want to compare the absolute values). Step 2: Divide by the diagonal termDivide the whole row by it's diagonal term (such that the diagonal term is a one). Step 3: Add multiplesThe goal in this step is to make the matrix upper triangular (every element below the diagonal is 0).Our first row currently has a 1 in the leftmost position. We want to zero out the leftmost position for all of the following rows. So for every row after the 1st row, you'll subtract some multiple times the first row.When you repeat all of these steps on the ith row, you'll do the same thing for all of the following rows. After you repeat all of the above steps on the entirety of the matrix, your matrix should be upper triangular with ones along the diagonal. The last step is to use backsubstitution to get the identity matrix. Part 2: BacksubstitutionNow that the matrix is upper triangular, we'll want to add multiples of rows to manipulate it into the identity matrix. Our last row contains only one value, add multiples of this row to each of the preceding rows to zero out the position in this matrix for all but the last row. Move up to the second to last row, and repeat this process. I recommend implementing the basics without the partial pivot, then incorporate the partial pivot after you have a solution that works for some matrices (I've included a collection of assert statements below that will pass without the partial pivot implemented). ###Code def solve_system(matrix): """ Solves a system of linear equations Parameters: matrix(N x N+1 numpy array): the augmented matrix to be solved Returns: (N x 1 numpy array): array of the solutions to the linear equations """ N = len(matrix) matrix = matrix.astype("float64") for i in range(N): # # Implement Partial Pivot maximumCol = -math.inf index = i for j in range(i, N): if(abs(matrix[j][i])>maximumCol): maximumCol = abs(matrix[j][i]) index = j if i!=index: tempRow = np.array(matrix[i].tolist()) matrix[i] = matrix[index] matrix[index] = tempRow # Divide by diagonal terms matrix[i] = matrix[i]/(matrix[i][i]) # Setting ith position of next rows to 0 for j in range(i+1,N): matrix[j] = matrix[j] - matrix[j][i]*matrix[i] # Implement backsubstitution for l in range(N-1,0,-1): for k in range(l): matrix[k] = matrix[k] - matrix[k][l]*matrix[l] #TO DO: Return solutions return matrix[:, -1] # return matrix ###Output _____no_output_____ ###Markdown I've included a detailed list of tests below. Once your code is running correctly, all of the following tests will pass. ###Code test_a = np.array([[1, 0, 3], [0, 1, 2]]) test_b = np.array([[3, 1, 5], [2, 2, 6]]) test_c = np.array([[2,1,4,1,-4], [3,4,-1,-1,3], [1,-4,1,5,9], [2,-2,1,3,7]]) test_d = np.array([[0, 1, 2], [1, 0, 3]]) test_e = np.array([[2, -4, 1, -3], [-1, 2, -2, -3], [1, 1, 1, 6]]) test_f = np.array([[1,1,1,1,12], [2, -1, -1, 1, 4], [1, -2, 1, -2, -15], [3, 3, 2, -1, 15]]) test_g = np.array([[0, 0, 1, 0, 10], [0, 1, 0, 0, 3], [1, 0, 0, 0, 4], [0, 0, 0, 1, 2]]) print(solve_system(test_g)) # These will pass without the pivot functionality assert(np.all((solve_system(test_a) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_b) - np.array([1, 2]) < 0.0001))) assert(np.all((solve_system(test_c) - np.array([2, -1, 2, 1]) < 0.0001))) assert(np.all((solve_system(test_f) - np.array([2, 4, 1, 5]) < 0.0001))) # These will NOT pass unless you have the partial pivot implemented correct assert(np.all((solve_system(test_d) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_e) - np.array([1, 2, 3]) < 0.0001))) assert(np.all((solve_system(test_g) - np.array([4, 3, 10, 2]) < 0.0001))) ###Output _____no_output_____ ###Markdown Gaussian Elimination ###Code import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Assume we have a relationship betwen three variables x, y, and z, that can be related using the following three equations:$$2x - 4y + z = -3$$$$-x + 2y -2z = -3$$$$x + y + z = 6$$We can then express these relationships in the matrix form:$$\begin{bmatrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{bmatrix} \begin{bmatrix} x \\ y \\ z \end{bmatrix} =\begin{bmatrix} -3 \\ -3 \\ 6\end{bmatrix}$$Often, this relation is written in it's more condensed short-hand form (known as an augmented matrix):$$\left[\begin{matrix}2 && -4 && 1 \\-1 && 2 && -2 \\1 && 1 && 1\end{matrix}\middle|\begin{matrix}-3 \\ -3 \\ 6\end{matrix}\right]$$ The idea of these problems, is to solve for the x, y and z values that satisfy the above conditions. When writing this process in a computer program, we need a very structured way of iterating and processing data. We'll want to organize our matrix in a way such that our program can solve every matrix in the same way. If we were computing this solution by hand, we'd know the following two matrices are identical statements (notice, just two rows are swapped):$$\left[\begin{matrix}1 && 0 && 0 \\0 && 0 && 1 \\0 && 1 && 0 \\\end{matrix}\middle|\begin{matrix}1 \\ 3 \\ 2\end{matrix}\right]=\left[\begin{matrix}1 && 0 && 0 \\0 && 1 && 0 \\0 && 0 && 1 \\\end{matrix}\middle|\begin{matrix}1 \\ 2 \\ 3\end{matrix}\right]$$ But, in order to create a strategic and methodically process for the computer, we'll have to organize the matrix such that our solution matrix will have 1's in the diagonal (also called the Identity Matrix). Here's how we can implement gaussian elimination with backsubstitution on the computer: Part 1: Iterate through your matrix. For each row, do the following steps: Step 1: Partial PivotEssentially, this just means we're going to include a process that will swap rows so that our final matrix will be the identity matrix.While you're iterating though the matrix, you'll need to compare the ith term of the ith row to the ith term in all the following rows. Swap the ith row with the row that has the largest ith element. ("Largest" in this case means farthest from 0, so you'll want to compare the absolute values). Step 2: Divide by the diagonal termDivide the whole row by it's diagonal term (such that the diagonal term is a one). Step 3: Add multiplesThe goal in this step is to make the matrix upper triangular (every element below the diagonal is 0).Our first row currently has a 1 in the leftmost position. We want to zero out the leftmost position for all of the following rows. So for every row after the 1st row, you'll subtract some multiple times the first row.When you repeat all of these steps on the ith row, you'll do the same thing for all of the following rows. After you repeat all of the above steps on the entirety of the matrix, your matrix should be upper triangular with ones along the diagonal. The last step is to use backsubstitution to get the identity matrix. Part 2: BacksubstitutionNow that the matrix is upper triangular, we'll want to add multiples of rows to manipulate it into the identity matrix. Our last row contains only one value, add multiples of this row to each of the preceding rows to zero out the position in this matrix for all but the last row. Move up to the second to last row, and repeat this process. I recommend implementing the basics without the partial pivot, then incorporate the partial pivot after you have a solution that works for some matrices (I've included a collection of assert statements below that will pass without the partial pivot implemented). ###Code import math def solve_system(matrix): """ Solves a system of linear equations Parameters: matrix(N x N+1 numpy array): the augmented matrix to be solved Returns: (N x 1 numpy array): array of the solutions to the linear equations """ N = len(matrix) matrix = matrix.astype("float64") for i in range(N): # TO DO: Implement Partial Pivot maxIndex = 0 maximum = 0 for a in range(i, N): if abs(matrix[a,i]) > maximum: maxIndex = a maximum = abs(matrix[a,i]) for c in range(N + 1): temp = matrix[i, c] matrix[i, c] = matrix[maxIndex, c] matrix[maxIndex, c] = temp # temp = np.array(matrix[maxIndex].tolist()) # matrix[maxIndex] = matrix[i] # matrix[i] = temp #Add multiples for j in range(i): temp = matrix[i,j] if matrix[i,j] != 0: for k in range(j, N + 1): matrix[i,k] = matrix[i,k] - matrix[j, k] * temp #matrix[j] = matrix[j] / matrix[j][j] if matrix[i][i] == 0: maxIndex = 0 maximum = 0 for a in range(i, N): if abs(matrix[a,i]) > maximum: maxIndex = a maximum = abs(matrix[a,i]) for c in range(N + 1): temp = matrix[i, c] matrix[i, c] = matrix[maxIndex, c] matrix[maxIndex, c] = temp # temp = np.array(matrix[maxIndex].tolist()) # matrix[maxIndex] = matrix[i] # matrix[i] = temp #Add multiples for j in range(i): temp = matrix[i,j] if matrix[i,j] != 0: for k in range(j, N + 1): matrix[i,k] = matrix[i,k] - matrix[j, k] * temp #Divide by diagonal term for j in range(N, i-1, -1): matrix[i,j] /= matrix[i,i] # TO DO: Implement backsubstitution ans = np.zeros(N) for r in range(N - 1, -1, -1): sub = 0 for c in range(r, N): sub += matrix[r, c] * ans[c] ans[r] = matrix[r, N] - sub #print(matrix) return ans ###Output _____no_output_____ ###Markdown I've included a detailed list of tests below. Once your code is running correctly, all of the following tests will pass. ###Code test_a = np.array([[1, 0, 3], [0, 1, 2]]) test_b = np.array([[3, 1, 5], [2, 2, 6]]) test_c = np.array([[2,1,4,1,-4], [3,4,-1,-1,3], [1,-4,1,5,9], [2,-2,1,3,7]]) test_d = np.array([[0, 1, 2], [1, 0, 3]]) test_e = np.array([[2, -4, 1, -3], [-1, 2, -2, -3], [1, 1, 1, 6]]) test_f = np.array([[1,1,1,1,12], [2, -1, -1, 1, 4], [1, -2, 1, -2, -15], [3, 3, 2, -1, 15]]) test_g = np.array([[0, 0, 1, 0, 10], [0, 1, 0, 0, 3], [1, 0, 0, 0, 4], [0, 0, 0, 1, 2]]) print(solve_system(test_b)) # These will pass without the pivot functionality assert(np.all((solve_system(test_a) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_b) - np.array([1, 2]) < 0.0001))) assert(np.all((solve_system(test_c) - np.array([2, -1, 2, 1]) < 0.0001))) assert(np.all((solve_system(test_f) - np.array([2, 4, 1, 5]) < 0.0001))) # These will NOT pass unless you have the partial pivot implemented correct assert(np.all((solve_system(test_d) - np.array([3, 2]) < 0.0001))) assert(np.all((solve_system(test_e) - np.array([1, 2, 3]) < 0.0001))) assert(np.all((solve_system(test_g) - np.array([4, 3, 10, 2]) < 0.0001))) ###Output _____no_output_____
module4-decision-trees/decision-trees-assignment.ipynb
###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* ###Code import graphviz import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score from sklearn.model_selection import train_test_split import statsmodels.api as sm from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz from sklearn.impute import SimpleImputer pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv') df['ExterQual'] = df['ExterQual'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['ExterCond'] = df['ExterCond'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['BsmtQual'] = df['BsmtQual'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['BsmtCond'] = df['BsmtCond'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['BsmtFinType1'] = df['BsmtFinType1'].replace({'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec':3, 'LwQ':2, 'Unf':1, np.nan:0}) df['BsmtFinType2'] = df['BsmtFinType2'].replace({'GLQ': 6, 'ALQ': 5, 'BLQ': 4, 'Rec':3, 'LwQ':2, 'Unf':1, np.nan:0}) df['HeatingQC'] = df['HeatingQC'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['CentralAir'] = df['ExterQual'].replace({'No':0, 'Yes':1}) df['KitchenQual'] = df['KitchenQual'].replace({'Ex': 4, 'Gd': 3, 'TA':2, 'Fa':1, 'Po':0}) df['FireplaceQu'] = df['FireplaceQu'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['GarageQual'] = df['GarageQual'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['GarageCond'] = df['GarageCond'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, 'Po':1, np.nan:0}) df['PoolQC'] = df['PoolQC'].replace({'Ex': 5, 'Gd': 4, 'TA':3, 'Fa':2, np.nan:0}) # get rid of nulls in Garage year built feature df['GarageYrBlt'] = df['GarageYrBlt'].replace(np.nan,df['YearBuilt']) ## log linear of price df['ln_price'] = np.log(df['SalePrice']) df_test = df ## features engineered df_test['GarageCars_Squared'] = df_test['GarageCars']**2 df_test['BsmtFinSF1_Squared'] = df_test['BsmtFinSF1']**2 df_test['BsmtQual_Squared'] = df_test['BsmtQual']**2 df_test['GarageYrBlt_Squared'] = df_test['GarageYrBlt']**2 df_test['age_at_time_sold'] = df_test['YrSold'] - df_test['YearBuilt'] df_test['total_area'] = ( df_test['GarageArea'] + df_test['GrLivArea'] +df_test['TotalBsmtSF'] ) df_test['size_yard'] = df_test['LotArea'] - df_test['GrLivArea'] df_test['yard_int_liv'] = df_test['size_yard'] * df_test['GrLivArea'] df_test['qual_garage_int_bsmt'] = df_test['GarageQual'] * df_test['BsmtQual'] df_test['qual_exter_int_kitchen'] = df_test['ExterQual'] * df_test['KitchenQual'] df_test['total_rooms_bathrooms'] = df_test['TotRmsAbvGrd'] + df_test['FullBath'] df_test['Remodeled'] = (df_test['YearRemodAdd'] != df_test['YearBuilt']) * 1 df_test['RecentRemodel'] = (df_test['YearRemodAdd'] == df_test['YrSold']) * 1 ## function to visualize decision tree def viztree(decision_tree, feature_names): """Visualize a decision tree""" dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, filled=True, rounded=True) return graphviz.Source(dot_data) def run_decision_tree_model(X, y, features, max_depth=1): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) tree = DecisionTreeRegressor(max_depth=max_depth) tree.fit(X_train, y_train) print('Train R^2 Score: ', tree.score(X_train, y_train)) print('Test R^2 Score: ', tree.score(X_test, y_test)) display(viztree(tree, feature_names=features)) target = 'ln_price' features = ['total_area', 'OverallQual'] y = df_test[target] X = df_test[features] run_decision_tree_model(X, y, features, 4) ###Output Train R^2 Score: 0.8044993479007602 Test R^2 Score: 0.782080670300658 ###Markdown Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output _____no_output_____ ###Markdown Train a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html ###Code def classifier_decision_tree_model(X, y, features, max_depth=1): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) tree = DecisionTreeClassifier(max_depth=max_depth) tree.fit(X_train, y_train) print('Train Accuracy Score: ', tree.score(X_train, y_train)) print('Test Accuracy Score: ', tree.score(X_test, y_test)) display(viztree(tree, feature_names=features)) features = X.columns classifier_decision_tree_model(X, y, features, 4) ###Output Train Accuracy Score: 1.0 Test Accuracy Score: 1.0 ###Markdown Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html ###Code def logistic_regression(X, y): X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.80, test_size=.20, random_state=42) model = LogisticRegression() model.fit(X_train, y_train) print('Train Accuracy Score: ', model.score(X_train, y_train)) print('Test Accuracy Score: ', model.score(X_test, y_test)) logistic_regression(X,y) ###Output Train Accuracy Score: 0.8181818181818182 Test Accuracy Score: 0.3333333333333333 ###Markdown Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html ###Code ## ^^ ###Output _____no_output_____ ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output (14, 10) (14,) ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output (14, 10) (14,) ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output (14, 10) (14,) ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* ###Code import matplotlib.pyplot as plt import graphviz from IPython.display import display from ipywidgets import interact from IPython.display import SVG from graphviz import Source import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import numpy as np import pandas as pd import seaborn as sns from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn import tree from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv' df = pd.read_csv(url) df.head() df['LotFrontage'] = df['LotFrontage'].fillna(df['LotFrontage'].mean()) df = df.dropna(subset=['GarageYrBlt','MasVnrArea']) cat_vals = {'Gd':4, 'TA':2, 'Ex':5, 'Fa':3, np.NaN:0,'Po':1} df['BsmtQual'] = df['BsmtQual'].replace(cat_vals) df['ExterQual'] = df['ExterQual'].replace(cat_vals) df['PoolQC'] = df['PoolQC'].replace(cat_vals) df['HeatingQC'] = df['HeatingQC'].replace(cat_vals) df['KitchenQual'] = df['KitchenQual'].replace(cat_vals) df['FireplaceQu'] = df['FireplaceQu'].replace(cat_vals) df['GarageQual'] = df['GarageQual'].replace(cat_vals) df['ExterCond'] = df['ExterCond'].replace(cat_vals) df['BsmtCond'] = df['BsmtCond'].replace(cat_vals) df['GarageCond'] = df['GarageCond'].replace(cat_vals) df['GarageFinish'] = df['GarageFinish'].replace({'Fin':2,'RFn':1,np.NaN:0,'Unf':-1}) df['PavedDrive'] = df['PavedDrive'].replace({'Y':1,'P':0,'N':-1}) df['LandSlope'] = df['LandSlope'].replace({'Gtl':3,'Mod':2,'Sev':1}) df['CentralAir'] = df['CentralAir'].replace({'Y':1,'N':0}) df['Electrical'] = df['Electrical'].replace({'SBrkr':3,'FuseA':2,'FuseF':1,'FuseP':0,'Mix':2.5,np.NaN:2.5}) df['Foundation'] = df['Foundation'].replace({'Slab':5,'PConc':4,'CBlock':3,'Stone':2,'BrkTil':1,'Wood':0}) df['Street'] = df['Street'].replace({'Grvl':0,'Pave':1}) df['LandContour'] = df['LandContour'].replace({'Lvl':3,'Bnk':2,'HLS':1,'Low':0}) df['LotShape'] = df['LotShape'].replace({'Reg':3,'IR1':2,'IR2':1,'IR3':0}) df['ln_SalePrice'] = np.log(df['SalePrice']) df = df[df.describe().columns] def viztree(decision_tree, feature_names): """Visualize a decision tree""" dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, filled=True, rounded=True) return graphviz.Source(dot_data) def viz3D(fitted_model, df, feature1, feature2, target='', num=100): """ Visualize model predictions in 3D, for regression or binary classification Parameters ---------- fitted_model : scikit-learn model, already fitted df : pandas dataframe, which was used to fit model feature1 : string, name of feature 1 feature2 : string, name of feature 2 target : string, name of target num : int, number of grid points for each feature References ---------- https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html https://scikit-learn.org/stable/auto_examples/tree/plot_iris.html """ x1 = np.linspace(df[feature1].min(), df[feature1].max(), num) x2 = np.linspace(df[feature2].min(), df[feature2].max(), num) X1, X2 = np.meshgrid(x1, x2) X = np.c_[X1.flatten(), X2.flatten()] if hasattr(fitted_model, 'predict_proba'): predicted = fitted_model.predict_proba(X)[:,1] else: predicted = fitted_model.predict(X) Z = predicted.reshape(num, num) fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X1, X2, Z, cmap='viridis') ax.set_xlabel(feature1) ax.set_ylabel(feature2) ax.set_zlabel(target) return fig X = df[['LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'GrLivArea', 'TotRmsAbvGrd', 'YrSold', 'FullBath']] y = df['SalePrice'] estimator = DecisionTreeRegressor(max_depth=10) estimator.fit(X, y) print('R^2', estimator.score(X,y)) graph = Source(tree.export_graphviz(estimator, out_file=None , feature_names=X.columns, class_names=['0', '1', '2'] , filled = True)) display(SVG(graph.pipe(format='svg'))) ###Output _____no_output_____ ###Markdown Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output (14, 10) (14,) ###Markdown Train a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html ###Code ###Output _____no_output_____ ###Markdown Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html ###Code ###Output _____no_output_____ ###Markdown Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html ###Code ###Output _____no_output_____ ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* ###Code # Import and read csv import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import r2_score from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv') df.columns.tolist() # Decision Tree with two features ames_X = df[['GrLivArea','1stFlrSF']] ames_y = df['SalePrice'] tree = DecisionTreeRegressor() tree.fit(ames_X, ames_y) print('R^2: ', tree.score(ames_X, ames_y)) # Same tree max_depth=3 tree = DecisionTreeRegressor(max_depth=3) tree.fit(ames_X,ames_y) print('R^2: ', tree.score(ames_X, ames_y)) tree = DecisionTreeRegressor(max_depth=5) tree.fit(ames_X,ames_y) print('R^2: ', tree.score(ames_X, ames_y)) ###Output R^2: 0.6984588965306299 ###Markdown Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output _____no_output_____ ###Markdown Train a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html ###Code dtc = DecisionTreeClassifier() dtc.fit(X,y) print('R^2: ', dtc.score(X,y)) ###Output R^2: 1.0 ###Markdown Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html ###Code from sklearn.linear_model import LogisticRegression lrc = LogisticRegression(solver='lbfgs').fit(X,y) lrc.score(X,y) ###Output _____no_output_____ ###Markdown Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html ###Code import graphviz from sklearn.tree import export_graphviz X.columns graphviz.Source(export_graphviz(dtc, out_file=None, feature_names=['Outlook_overcast', 'Outlook_rain', 'Outlook_sunny', 'Temperature_cool', 'Temperature_hot', 'Temperature_mild', 'Humidity_high', 'Humidity_normal', 'Windy_false', 'Windy_true'], filled=True, rounded=True)) ###Output _____no_output_____ ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* **Importing** ###Code %matplotlib inline import graphviz from IPython.display import display from ipywidgets import interact import matplotlib.pyplot as plt from mpl_toolkits import mplot3d import numpy as np import pandas as pd import seaborn as sns from scipy import stats from sklearn.metrics import mean_squared_error from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz def viz3D(fitted_model, df, feature1, feature2, target='', num=100): """ Visualize model predictions in 3D, for regression or binary classification Parameters ---------- fitted_model : scikit-learn model, already fitted df : pandas dataframe, which was used to fit model feature1 : string, name of feature 1 feature2 : string, name of feature 2 target : string, name of target num : int, number of grid points for each feature References ---------- https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html https://scikit-learn.org/stable/auto_examples/tree/plot_iris.html """ x1 = np.linspace(df[feature1].min(), df[feature1].max(), num) x2 = np.linspace(df[feature2].min(), df[feature2].max(), num) X1, X2 = np.meshgrid(x1, x2) def viztree(decision_tree, feature_names): """Visualize a decision tree""" dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, filled=True, rounded=True) return graphviz.Source(dot_data) X = np.c_[X1.flatten(), X2.flatten()] if hasattr(fitted_model, 'predict_proba'): predicted = fitted_model.predict_proba(X)[:,1] else: predicted = fitted_model.predict(X) Z = predicted.reshape(num, num) fig = plt.figure() ax = plt.axes(projection='3d') ax.plot_surface(X1, X2, Z, cmap='viridis') ax.set_xlabel(feature1) ax.set_ylabel(feature2) ax.set_zlabel(target) return fig url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv' ames_df = pd.read_csv(url) ###Output _____no_output_____ ###Markdown **Cleaned and Revised ** ###Code encoded = {'Gd':4, 'TA':2, 'Ex':5, 'Fa':3, np.NaN:0,'Po':1} # Basement ames_df['BsmtQual'] = ames_df['BsmtQual'].replace(encoded) ames_df['BsmtCond'] = ames_df['BsmtCond'].replace(encoded) # External Quality and Condition ames_df['ExterQual'] = ames_df['ExterQual'].replace(encoded) ames_df['ExterCond'] = ames_df['ExterCond'].replace(encoded) # Lot Info ames_df['LotFrontage'] = ames_df['LotFrontage'].fillna(ames_df['LotFrontage'].mean()) ames_df['LandSlope'] = ames_df['LandSlope'].replace({'Gtl':3,'Mod':2,'Sev':1}) ames_df['Foundation'] = ames_df['Foundation'].replace({'Slab':5,'PConc':4,'CBlock':3,'Stone':2,'BrkTil':1,'Wood':0}) ames_df['Street'] = ames_df['Street'].replace({'Grvl':0,'Pave':1}) ames_df['LandContour'] = ames_df['LandContour'].replace({'Lvl':3,'Bnk':2,'HLS':1,'Low':0}) ames_df['LotShape'] = ames_df['LotShape'].replace({'Reg':3,'IR1':2,'IR2':1,'IR3':0}) ames_df['ln_SalePrice'] = np.log(ames_df['SalePrice']) # Dropping Nans ames_df = ames_df.dropna(subset=['GarageYrBlt','MasVnrArea']) # Fireplace ames_df['FireplaceQu'] = ames_df['FireplaceQu'].replace(encoded) # Garage ames_df['GarageQual'] = ames_df['GarageQual'].replace(encoded) ames_df['GarageCond'] = ames_df['GarageCond'].replace(encoded) ames_df['GarageFinish'] = ames_df['GarageFinish'].replace({'Fin':2,'RFn':1,np.NaN:0,'Unf':-1}) ames_df['PavedDrive'] = ames_df['PavedDrive'].replace({'Y':1,'P':0,'N':-1}) # Pool ames_df['PoolQC'] = ames_df['PoolQC'].replace(encoded) # Heating / Cooling / Electrical ames_df['HeatingQC'] = ames_df['HeatingQC'].replace(encoded) ames_df['CentralAir'] = ames_df['CentralAir'].replace({'Y':1,'N':0}) ames_df['Electrical'] = ames_df['Electrical'].replace({'SBrkr':3,'FuseA':2,'FuseF':1,'FuseP':0,'Mix':2.5,np.NaN:2.5}) # Kitchen ames_df['KitchenQual'] = ames_df['KitchenQual'].replace(encoded) ames_df = ames_df[ames_df.describe().columns] ###Output _____no_output_____ ###Markdown **Decision Tree With 1 Feature** ###Code target = 'SalePrice' features = ['OverallQual'] x = ames_df[features] y = ames_df[target] tree = DecisionTreeRegressor() tree.fit(x, y) print("R-Squared:", tree.score(x, y)) # Splitting into Train and Test x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.80, test_size=0.20, random_state=42) for depth in range(1,16): tree = DecisionTreeRegressor(max_depth=depth) # Fitting Model tree.fit(x_train, y_train) # Calculating RMSE RMSE = (np.sqrt(mean_squared_error(y_test, tree.predict(x_test)))) # Displaying Results print('max_depth: {} \t model score: {} \t Test RMSE: {}' .format(depth, tree.score(x, y), RMSE)) # Decision Tree display(viztree(tree, features)) ###Output _____no_output_____ ###Markdown **Try Multiple Features** ###Code ames_df.columns target = ['SalePrice'] features = ['YearBuilt', 'OverallQual', 'GarageCars', 'OverallCond', 'BedroomAbvGr'] x2 = ames_df[features] y2 = ames_df['SalePrice'] tree = DecisionTreeRegressor() tree.fit(x2, y2) print("R-Squared:", tree.score(x2, y2)) x_train, x_test, y_train, y_test = train_test_split(x2, y2, train_size=0.80, test_size=0.20, random_state=42) for depth in range(1,20): tree = DecisionTreeRegressor(max_depth=depth) # Fitting tree.fit(x_train, y_train) # Calculating RMSE RMSE = (np.sqrt(mean_squared_error(y_test, tree.predict(x_test)))) # Displaying Results print('max_depth: {} \t model score: {} \t Test RMSE: {}' .format(depth, tree.score(x2, y2), RMSE)) # Run the cell to see tree...sorry display(viztree(tree, features)) ###Output _____no_output_____ ###Markdown --- ###Code # Best Score So Far... target = ['SalePrice'] features = ['YearBuilt', 'YearRemodAdd', 'LotArea', 'TotRmsAbvGrd'] x3 = ames_df[features] y3 = ames_df[target] tree = DecisionTreeRegressor() tree.fit(x3, y3) print("R-Squared:", tree.score(x3, y3)) # Splitting into Train and Test x_train, x_test, y_train, y_test = train_test_split(x3, y3, train_size=0.80, test_size=0.20, random_state=42) for depth in range(1,20): tree = DecisionTreeRegressor(max_depth=depth) # Fitting the Model tree.fit(x_train, y_train) # Calculating RMSE rmse = (np.sqrt(mean_squared_error(y_test, tree.predict(x_test)))) # Displaying Results print('max_depth: {} \t model score: {} \t Test RMSE: {}' .format(depth, tree.score(x3, y3), RMSE)) # Run to see tree.. display(viztree(tree, features)) ###Output _____no_output_____ ###Markdown --- ###Code # Nevermind...this score is great target = ['SalePrice'] features = ['LotArea','OverallQual', 'OverallCond', 'YearBuilt','TotRmsAbvGrd'] x4 = ames_df[features] y4 = ames_df[target] tree = DecisionTreeRegressor() tree.fit(x4, y4) print("R-Squared:", tree.score(x4, y4)) # Splitting into Train and Test x_train, x_test, y_train, y_test = train_test_split(x4, y4, train_size=0.80, test_size=0.20, random_state=42) for depth in range(1,20): tree = DecisionTreeRegressor(max_depth=depth) # Fitting the model tree.fit(x_train, y_train) # Calculating RMSE RMSE = (np.sqrt(mean_squared_error(y_test, tree.predict(x_test)))) # Displaying Results print('max_depth: {} \t model score: {} \t Test RMSE: {}' .format(depth, tree.score(x4, y4), RMSE)) # Run to see tree display(viztree(tree, features)) ###Output _____no_output_____ ###Markdown Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output _____no_output_____ ###Markdown Train a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42) tree = DecisionTreeClassifier() tree.fit(X_train, y_train) print('Decision Tree Classifier:', tree.score(X_test, y_test)) ###Output Decision Tree Classifier: 1.0 ###Markdown Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html ###Code from sklearn.linear_model import LogisticRegression # Type of Model logistic = LogisticRegression(solver='lbfgs') # Fitting logistic.fit(X,y) # Displaying Results print('Accuracy:',tree.score(X,y)) ###Output Accuracy: 1.0 ###Markdown Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html ###Code # Function def class_tree(X,y,features,depth=3): # Type of Model tree = DecisionTreeClassifier(max_depth=depth) # Fitting the Model tree.fit(X,y) # Displaying Results print('R-Squared Score:', tree.score(X, y)) display(viztree(tree, feature_names=features)) # Showing Tree features=X.columns display(viztree(tree, features)) ###Output _____no_output_____ ###Markdown _Lambda School Data Science — Tree Ensembles_ Decision Trees Assignment Part 1: House Price RegressionApply decision trees to the Ames housing dataset you've worked with this week!- Try multiple features- Try features you've engineered- Try different `max_depth` paramaters- What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!*- What's a cool visualization you can make? *Share with your cohort on Slack!* ###Code import pandas as pd import numpy as np import seaborn as sns df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv') pd.set_option('display.max_rows',100) df.shape def check_missings(): return df.apply(pd.isnull).sum().sort_values(ascendaing=False).loc[lambda series: series>0] ###Output _____no_output_____ ###Markdown Cleaning ###Code df['PoolQC'] = df['PoolQC'].replace({np.nan:'No pool'}) df['MiscFeature'] = df['MiscFeature'].replace({np.nan: 'No misc. features'}) df['Alley'] = df['Alley'].replace({np.nan: 'No alley'}) df['Fence'] = df['Fence'].replace({np.nan: 'No fence'}) df['FireplaceQu'] = df['FireplaceQu'].replace({np.nan: 'No fireplace'}) df['GarageFinish'] = df['GarageFinish'].replace({np.nan: 'No garage'}) df['GarageCond'] = df['GarageCond'].replace({np.nan: 'No garage'}) df['GarageQual'] = df['GarageQual'].replace({np.nan: 'No garage'}) df['GarageType'] = df['GarageType'].replace({np.nan: 'No garage'}) df['BsmtCond'] = df['BsmtCond'].replace({np.nan: 'No basement'}) df['BsmtExposure'] = df['BsmtExposure'].replace({np.nan: 'No basement'}) df['BsmtQual'] = df['BsmtQual'].replace({np.nan: 'No basement'}) df['BsmtFinType2'] = df['BsmtFinType2'].replace({np.nan: 'No basement'}) df['BsmtFinType1'] = df['BsmtFinType1'].replace({np.nan: 'No basement'}) df['MasVnrType'] = df['MasVnrType'].replace({np.nan: 'No masonry'}) df['GarageYrBlt'] = df['GarageYrBlt'].fillna(0) df['MasVnrArea'] = df['MasVnrArea'].fillna(0) df['LotFrontage'] = df['LotFrontage'].fillna(0) df['MSZoning'] = df['MSZoning'].fillna('RL') df['Utilities'] = df['Utilities'].fillna('AllPub') df['BsmtHalfBath'] = df['BsmtHalfBath'].fillna(0) df['BsmtFullBath'] = df['BsmtFullBath'].fillna(0) df['Functional'] = df['Functional'].fillna('Typ') df['GarageArea'] = df['GarageArea'].fillna(df['GarageArea'].mean()) df['GarageCars'] = df['GarageCars'].fillna(2) df['BsmtFinSF2'] = df['BsmtFinSF2'].fillna(df['BsmtFinSF2'].mean()) df['BsmtFinSF1'] = df['BsmtFinSF1'].fillna(df['BsmtFinSF1'].mean()) df['Electrical'] = df['Electrical'].fillna('SBrkr') df['Exterior2nd'] = df['Exterior2nd'].fillna('VinylSd') df['KitchenQual'] = df['KitchenQual'].fillna('TA') df['Exterior1st'] = df['Exterior1st'].fillna(df['Exterior1st'].value_counts().index[0]) df['BsmtUnfSF'] = df['BsmtUnfSF'].fillna(df['BsmtUnfSF'].mean()) df['TotalBsmtSF'] = df['TotalBsmtSF'].fillna(df['TotalBsmtSF'].mean()) df['SaleType'] = df['SaleType'].fillna(df['SaleType'].value_counts().index[0]) ###Output _____no_output_____ ###Markdown Exploring ###Code df.head() sns.boxplot(df['SaleCondition'], df['SalePrice']) total_area = df['TotalBsmtSF']+df['1stFlrSF']+df['2ndFlrSF']+df['GarageArea'] +df['WoodDeckSF']+df['OpenPorchSF']+df['EnclosedPorch']+df['3SsnPorch'] +df['ScreenPorch']+df['PoolArea'] lot_usage_rate = df['LotArea']/total_area lot_usage_rate.describe() lot_usage_rate.plot() df['lot_usage_rate']=lot_usage_rate df['totalSF']=df['TotalBsmtSF']+df['1stFlrSF']+df['2ndFlrSF'] sns.distplot(df['GarageYrBlt']) df.GarageYrBlt.value_counts().head(5) df['GarageYrBlt'] = df['GarageYrBlt'].replace({0:df['GarageYrBlt'][df['GarageYrBlt'] !=0].mean()}) df.GarageYrBlt.value_counts().head(5) df['has_garage'] = df['GarageArea'] > 0 df['has_woodDeck'] = df['WoodDeckSF'] > 0 df['has_openPorch'] = df['OpenPorchSF'] > 0 df['has_enclosedPorch'] = df['EnclosedPorch'] > 0 df['has_3ssnPorch'] = df['3SsnPorch'] > 0 df['has_screenPorch'] = df['ScreenPorch'] > 0 df['has_pool'] = df['PoolArea'] > 0 df['has_basement'] = df['TotalBsmtSF'] > 0 df['has_lot_frontage'] = df['LotFrontage'] > 0 df[['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'CentralAir', 'Electrical', 'Functional', 'GarageType', 'GarageFinish', 'PavedDrive', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition', 'has_garage', 'has_woodDeck', 'has_openPorch', 'has_enclosedPorch', 'has_3ssnPorch', 'has_screenPorch', 'has_pool', 'has_basement', 'has_lot_frontage']] = df[['MSSubClass', 'MSZoning', 'Street', 'Alley', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'Foundation', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'CentralAir', 'Electrical', 'Functional', 'GarageType', 'GarageFinish', 'PavedDrive', 'Fence', 'MiscFeature', 'SaleType', 'SaleCondition', 'has_garage', 'has_woodDeck', 'has_openPorch', 'has_enclosedPorch', 'has_3ssnPorch', 'has_screenPorch', 'has_pool', 'has_basement', 'has_lot_frontage']].apply(pd.Categorical) df['ExterQual'] = pd.Categorical(df['ExterQual'], ordered=True, categories=['Fa','TA','Gd','Ex']) df['ExterCond'] = pd.Categorical(df['ExterCond'], ordered=True, categories=['Po','Fa','TA','Gd','Ex']) df['BsmtQual'] = pd.Categorical(df['BsmtQual'], ordered=True, categories=['No basement','Fa','TA','Gd','Ex']) df['BsmtCond'] = pd.Categorical(df['BsmtCond'], ordered=True, categories=['No basement','Po','Fa','TA','Gd']) df['HeatingQC'] = pd.Categorical(df['HeatingQC'], ordered=True, categories=['Po','Fa','TA','Gd','Ex']) df['KitchenQual'] = pd.Categorical(df['KitchenQual'], ordered=True, categories=['Fa','TA','Gd','Ex']) df['FireplaceQu'] = pd.Categorical(df['FireplaceQu'], ordered=True, categories=['No fireplace','Po','Fa','TA','Gd','Ex']) df['GarageQual'] = pd.Categorical(df['GarageQual'], ordered=True, categories=['No garage','Po','Fa','TA','Gd','Ex']) df['GarageCond'] = pd.Categorical(df['GarageCond'], ordered=True, categories=['No garage','Po','Fa','TA','Gd','Ex']) df['PoolQC'] = pd.Categorical(df['PoolQC'], ordered=True, categories=['No pool','Fa','Gd','Ex']) df.columns corr = df.corr() sns.set(rc={'figure.figsize':(11.7,8.27)}) sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values) sorted_corrs = df.corr()['SalePrice'].apply(np.abs).sort_values(ascending=False) sns.stripplot(sorted_corrs, sorted_corrs.index, orient='h') ###Output _____no_output_____ ###Markdown OverallQual, totalSF, GrLivArea Decision tree ###Code from sklearn.tree import DecisionTreeRegressor ames_X=df[['OverallQual', 'totalSF', 'GrLivArea']] ames_y=df['SalePrice'] tree = DecisionTreeRegressor() tree.fit(ames_X,ames_y) print('R^2',tree.score(ames_X,ames_y)) ###Output R^2 0.9965792591794982 ###Markdown Part 2 / Stretch: "Play Tennis" Classification We'll reproduce the "Play Tennis" example from Ross Quinlan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf).[According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "John Ross Quinlan is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." "Table 1 shows a small training set" ###Code import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis ###Output _____no_output_____ ###Markdown "A decision tree that correctly classifies each object in the training set is given in Figure 2." In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. ###Code tennis['PlayTennis'].value_counts(normalize=True) * 100 ###Output _____no_output_____ ###Markdown The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days ###Code tennis.groupby('Outlook')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) ###Code sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) ###Code rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 ###Output _____no_output_____ ###Markdown Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummiescomputing-indicator-dummy-variables) ###Code y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X ###Output _____no_output_____ ###Markdown Train a Decision Tree ClassifierGet a score of 100% (accuracy)https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html ###Code from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier() tree.fit(X,y) print('accuracy',tree.score(X,y)) ###Output accuracy 1.0 ###Markdown Compare to Logistic Regressionhttps://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html ###Code from sklearn.linear_model import LogisticRegression import numpy as np model=LogisticRegression() model.fit(X,y) print('r^2',model.score(X,y)) ###Output r^2 0.8571428571428571 ###Markdown Visualize the treehttps://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html ###Code !pip install dtreeviz from dtreeviz.trees import dtreeviz dtreeviz(tree, X, y, target_name='Play Tennis', feature_names=X.columns, class_names=['No', 'Yes']) ###Output _____no_output_____
Camino mas corto.ipynb
###Markdown Problema buscar el camino mas corto Funciones a usar ###Code def ordenar_lista(listado): ordenado = sorted(listado, key=lambda s: (len(s), s)) return ordenado def inserta(x, lst, i): """Devuelve una nueva lista resultado de insertar x dentro de lst en la posición i. """ return lst[:i] + [x] + lst[i:] def inserta_multiple(x, lst): """Devuelve una lista con el resultado de insertar x en todas las posiciones de lst. """ return [inserta(x, lst, i) for i in range(len(lst) + 1)] def permuta(c): """Calcula y devuelve una lista con todas las permutaciones posibles que se pueden hacer con los elementos contenidos en c. """ if len(c) == 0: return [[]] return sum([inserta_multiple(c[0], s) for s in permuta(c[1:])], []) ###Output _____no_output_____ ###Markdown ciudades ###Code ciudades = ["A", "B", "C", "D"] distancia_ciudades = { "AB" : 9, "AC" : 7, "AD" : 8, "BC" : 10, "BD" : 15, "CD" : 4, } # reflejar distancias nueva_distancia_ciudades = dict() for i in distancia_ciudades: start, end = i[0], i[1] nueva_distancia_ciudades[end + start] = distancia_ciudades[ start + end ] nueva_distancia_ciudades[start + end] = distancia_ciudades[ start + end ] distancia_ciudades = nueva_distancia_ciudades # fin reflejar distancias ###Output _____no_output_____ ###Markdown Crear todas las opciones posibles de visitas a las ciudades ###Code todas_opciones = ordenar_lista(permuta(ciudades)) todas_opciones_visitar_ciudades = list() for opc in todas_opciones: orden_visitar_ciudades = list() for i in range(len(opc) - 1): orden_visitar_ciudades.append( opc[i] + opc[i+1] ) orden_visitar_ciudades.append( opc[len(opc)-1] + opc[0] ) todas_opciones_visitar_ciudades.append(orden_visitar_ciudades) ###Output _____no_output_____ ###Markdown De todas las opciones buscar la distancia mas corta ###Code mejor_respuesta = (0, 1e100) for i in todas_opciones_visitar_ciudades: maxima_distancia = 0 for comp in i: maxima_distancia = maxima_distancia + distancia_ciudades[comp] if maxima_distancia < mejor_respuesta[1]: mejor_respuesta = i, maxima_distancia print("recorrido: {} - distancia: {} Km".format(mejor_respuesta[0], mejor_respuesta[1])) ###Output recorrido: ['AB', 'BC', 'CD', 'DA'] - distancia: 31 Km
notebooks/tune_gan.ipynb
###Markdown check the original implementation in audio_util.py ###Code def read_STOI(clean_file, noise_file, enhanced_file): clean_wav = sp.load_wav(clean_file) noise_wav = sp.load_wav(noise_file) enhanced_wav = sp.load_wav(enhanced_file) minL = min(len(clean_wav),len(enhanced_wav)) clean_wav = clean_wav[:minL] noise_wav = noise_wav[:minL] enhanced_wav = enhanced_wav[:minL] stoi_score = stoi(clean_wav, enhanced_wav + noise_wav, 44100, extended=True) * 2 return stoi_score def read_SIIB(clean_file, noise_file, enhanced_file): clean_wav = sp.load_wav(clean_file) noise_wav = sp.load_wav(noise_file) enhanced_wav = sp.load_wav(enhanced_file) minL = min(len(clean_wav),len(enhanced_wav)) clean_wav = clean_wav[:minL] noise_wav = noise_wav[:minL] enhanced_wav = enhanced_wav[:minL] siib_score = au.SIIB_Wrapper_ger(clean_wav, enhanced_wav + noise_wav, 44100) return siib_score train_clean_paths = glob.glob(os.path.join(train_clean_dir, '*.wav')) train_clean_paths.sort() stoi_list = [] siib_list = [] for train_clean_path in train_clean_paths[0:3]: train_noise_path = os.path.join(train_noise_dir, os.path.basename(train_clean_path)) train_enhanced_path = os.path.join(train_enhanced_dir, os.path.basename(train_clean_path)) stoi_score = read_STOI(train_clean_path, train_noise_path, train_enhanced_path) siib_score = read_SIIB(train_clean_path, train_noise_path, train_enhanced_path) print('STOI = ' + str(stoi_score) + ', SIIB = ' + str(siib_score)) stoi_list.append(stoi_score) siib_list.append(siib_score) print('average STOI = ' + str(sum(stoi_list) / len(stoi_list)) + ', max SIIB = ' + str(max(stoi_list)) + ', min SIIB = ' + str(min(stoi_list))) print('aberage SIIB = ' + str(sum(siib_list) / len(siib_list)) + ', max STOI = ' + str(max(siib_list)) + ', min STOI = ' + str(min(siib_list))) ###Output average STOI = 1.0134639106480265, max SIIB = 1.2377594475749287, min SIIB = 0.7265122878895199 aberage SIIB = 0.9323891914798373, max STOI = 0.9999852633470133, min STOI = 0.7979607519752139 ###Markdown implement functions for JR projects. ###Code def logistic_function(x, a, b): y = 1/(1+np.exp(-a*(x-b))) return y def mapping_func_jr(x): # german #y = 1/(1+np.exp(-14.83*(x+0.66))) # first try. #y = 1/(1+np.exp(-0.066*(x-54))) y = 1/(1+np.exp(-0.00924*(x-170))) return y def normalize_stoi(x): y = 1/(1+np.exp(-4.62*(x-0.5))) return y # def SIIB_Wrapper_jr(x,y,fs): # minL = min(len(x),len(y)) # x = x[:minL] # y = y[:minL] # M = len(x)/fs # if(M<20): # x = np.hstack([x]*round(50/M)) # y = np.hstack([y]*round(50/M)) # #return mapping_func_jr(SIIB(x,y,fs,gauss=True)) # return SIIB(x,y,fs,gauss=True) def read_wavs(clean_file, noise_file, enhanced_file): clean_wav = sp.load_wav(clean_file) noise_wav = sp.load_wav(noise_file) enhanced_wav = sp.load_wav(enhanced_file) minL = min(len(clean_wav), len(noise_wav), len(enhanced_wav)) clean_wav = clean_wav[:minL] noise_wav = noise_wav[:minL] enhanced_wav = enhanced_wav[:minL] return clean_wav, noise_wav, enhanced_wav def read_siib(clean_file, noise_file, enhanced_file, remove_noise=False, mapping=False, sampling_frequency=44100): wav_clean, wav_noise, wav_enhanced = read_wavs(clean_file, noise_file, enhanced_file) if remove_noise: wav_mixed = wav_enhanced else: wav_mixed = wav_noise + wav_enhanced M = len(wav_clean)/sampling_frequency if(M < 20): wav_clean = np.hstack([wav_clean]*round(50/M)) wav_mixed = np.hstack([wav_mixed]*round(50/M)) siib_score = SIIB(wav_clean, wav_mixed, sampling_frequency, gauss=True) if mapping: siib_score = mapping_func_jr(siib_score) return siib_score def read_stoi(clean_file, noise_file, enhanced_file, remove_noise=False, mapping=False, sampling_frequency=44100): wav_clean, wav_noise, wav_enhanced = read_wavs(clean_file, noise_file, enhanced_file) if remove_noise: stoi_score = stoi(wav_clean, wav_enhanced, sampling_frequency, extended=True) else: stoi_score = stoi(wav_clean, wav_enhanced + wav_noise, sampling_frequency, extended=True) if mapping: stoi_score = normalize_stoi(stoi_score) return stoi_score wav_clean_paths = glob.glob(os.path.join(train_clean_dir, '*.wav')) wav_clean_path = wav_clean_paths[10] wav_noise_path = wav_clean_path.replace(train_clean_dir, train_noise_dir) wav_enhanced_path = wav_clean_path.replace(train_clean_dir, train_enhanced_dir) # x = calc_siib_file(wav_clean_path, wav_noise_path, wav_enhanced_path, sampling_frequency=44100) # _, y = calc_normalized_siib_file(wav_clean_path, wav_noise_path, wav_enhanced_path, sampling_frequency=44100) siib_score = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=False, sampling_frequency=44100) siib_score_n = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=True, sampling_frequency=44100) stoi_score = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=False, sampling_frequency=44100) stoi_score_n = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=True, sampling_frequency=44100) print('SIIB: {0} ({1:.2})'.format(siib_score, siib_score_n)) print('STOI: {0:3.2} ({1:.2})'.format(stoi_score, stoi_score_n)) wav_noise_path # check the distribution of SIIB. wav_clean_paths = glob.glob(os.path.join(train_clean_dir, '*.wav')) wav_clean_paths.sort() siib_y_list = [] siib_x_list = [] stoi_x_list = [] stoi_y_list = [] # no noise siib_x0_list = [] siib_y0_list = [] stoi_x0_list = [] stoi_y0_list = [] for i, wav_clean_path in enumerate(wav_clean_paths[:], start=1): wav_basename = os.path.basename(wav_clean_path) wav_noise_path = os.path.join(train_noise_dir, wav_basename) wav_enhanced_path = os.path.join(train_enhanced_dir, wav_basename) print('\r' + '{}/{}'.format(i, len(wav_clean_paths)), end="") #stoi_score = read_STOI(train_clean_path, train_noise_path, train_enhanced_path) #siib_x = calc_siib_file(wav_clean_path, wav_noise_path, wav_enhanced_path) #_, siib_y = calc_normalized_siib_file(wav_clean_path, wav_noise_path, wav_enhanced_path) siib_x = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path) siib_y = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=True) stoi_x = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path) stoi_y = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path, mapping=True) siib_x0 = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path, remove_noise=True) siib_y0 = read_siib(wav_clean_path, wav_noise_path, wav_enhanced_path, remove_noise=True, mapping=True) stoi_x0 = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path, remove_noise=True) stoi_y0 = read_stoi(wav_clean_path, wav_noise_path, wav_enhanced_path, remove_noise=True, mapping=True) siib_x_list.append(siib_x) siib_y_list.append(siib_y) stoi_x_list.append(stoi_x) stoi_y_list.append(stoi_y) siib_x0_list.append(siib_x0) siib_y0_list.append(siib_y0) stoi_x0_list.append(stoi_x0) stoi_y0_list.append(stoi_y0) # np.save('siib_x_list.npy', siib_x_list) # np.save('siib_y_list.npy', siib_y_list) plt.plot(siib_y_list, stoi_x_list, 'b.') #plt.plot(siib_y0_list, stoi_y0_list, 'r.') #plt.xlabel('SIIB', fontsize=14) #plt.ylabel('normalized SIIB', fontsize=14) plt.plot(siib_x_list, siib_y_list, '.') plt.xlabel('SIIB', fontsize=14) plt.ylabel('normalized SIIB', fontsize=14) np.min(siib_x_list) ###Output _____no_output_____ ###Markdown calculate the appropriate a and b ###Code siib_y_np = np.array(siib_y_list) siib_x_np = np.array(siib_x_list) popt, pcov = curve_fit(logistic_function, siib_x_np, siib_y_np) print(popt) # why not just calculate a and b analytically? #np.max(siib_x_list) # 318.6 #np.min(siib_x_list) # 27.4 # so find a and b to map siib 75 --> 0.8, siib 33 --> 0.2. def calc_a_b(x_1, y_1, x_2, y_2): a = np.log((1/y_1 - 1) / (1/y_2 - 1))/(x_2 - x_1) b = x_1 + np.log(1/y_1 - 1)/a return a, b a, b = calc_a_b(320, 0.8, 20, 0.2) a, b = calc_a_b(0.8, 0.8, 0.2, 0.2) print(a) print(b) ###Output 4.620981203732968 0.5
08 Network/Network.ipynb
###Markdown The below environment is required for this notebook to work ###Code print(__import__('sys').version) !conda list -n NLP37 ###Output _____no_output_____ ###Markdown https://www.lfd.uci.edu/~gohlke/pythonlibs/python-igraph ###Code %pylab qt from tqdm import tqdm from toolz import curry, compose from itertools import repeat from types import FunctionType from itertools import combinations from dateutil.parser import parse from datetime import datetime as dt from datetime import timedelta from dateutil.tz import tz from matplotlib.ticker import StrMethodFormatter from collections import Counter import os import re import pytz import community import networkx as nx import igraph as ig import pandas as pd import math try: import cPickle as pickle except: import pickle def save_pickle(filename, data): print('Pickling data...') with open(os.path.normpath(filename), 'wb') as open_file: pickle.dump(data, open_file) def load_pickle(filename): print('Loading pickled data...') with open(os.path.normpath(filename), 'rb') as open_file: return pickle.load(open_file) def unpack_input(data): output = {} for doc in data: date = doc['datetime'] if date not in output: output[date] = {'docs': []} output[date]['docs'].append(doc) return output def calculate_node_degree(data): for date in data: for doc in data[date]['docs']: for edge in doc['edges']: for node in edge['nodes']: data[date]['nodes'][node] += edge['weight'] return data def sentence_significance(omissions, data): for date in data: for doc in data[date]['docs']: nodes = {} edges = [] doc_ents = [] for sentence in doc['sentences']: doc_ents.extend(sentence) doc_ents = {ent['word'] for ent in doc_ents if ent['tag'] not in omissions} for ent in doc_ents: if ent not in nodes: nodes[ent] = {'weight': 0, 'neighbours': {}} for sentence in doc['sentences']: counts = entity_counter(sentence, omissions) sig, n_ent = compute_significance(counts) ents = combinations(list(sig.keys()), 2) for ent1, ent2 in ents: if ent1 != ent2: if ent2 in nodes[ent1]['neighbours']: nodes[ent1]['neighbours'][ent2]['weight'] += sig[ent1] + sig[ent2] else: edge = {'weight': sig[ent1] + sig[ent2], 'nodes': [ent1, ent2]} nodes[ent1]['neighbours'][ent2] = edge nodes[ent2]['neighbours'][ent1] = edge edges.append(edge) doc['nodes'] = nodes doc['edges'] = edges return data def document_significance(omissions, data): for date in data: for doc in data[date]['docs']: doc_ents = [] for sentence in doc['sentences']: doc_ents.extend(sentence) if 'nodes' not in doc: doc['nodes'] = {} doc['edges'] = [] counts = entity_counter(doc_ents, omissions) sig, n_ent = compute_significance(counts) doc_ents = {ent['word'] for ent in doc_ents if ent['tag'] not in omissions} for ent in doc_ents: if ent not in doc['nodes']: doc['nodes'][ent] = {'weight': 0, 'neighbours': {}} ents = combinations(doc_ents, 2) for ent1, ent2 in ents: if ent1 in sig: n1 = sig[ent1] else: n1 = 0 if ent2 in sig: n2 = sig[ent2] else: n2 = 0 if ent2 in doc['nodes'][ent1]['neighbours']: doc['nodes'][ent1]['neighbours'][ent2]['weight'] += n1 + n2 else: edge = {'weight': n1 + n2, 'nodes': [ent1, ent2]} doc['nodes'][ent1]['neighbours'][ent2] = edge doc['nodes'][ent2]['neighbours'][ent1] = edge doc['edges'].append(edge) return data def sentence_co_occurrence(omissions, data): for date in data: for doc in data[date]['docs']: doc['nodes'] = {} doc['edges'] = [] for sentence in doc['sentences']: sentence = [ent['word'] for ent in sentence if ent['tag'] not in omissions] for ent in set(sentence): if ent not in doc['nodes']: doc['nodes'][ent] = {'weight': 0, 'neighbours': {}} ents = combinations(sentence, 2) for ent1, ent2 in ents: if ent1 != ent2: if ent2 in doc['nodes'][ent1]['neighbours']: doc['nodes'][ent1]['neighbours'][ent2]['weight'] += 1 else: edge = {'weight': 1, 'nodes': [ent1, ent2]} doc['nodes'][ent1]['neighbours'][ent2] = edge doc['nodes'][ent2]['neighbours'][ent1] = edge doc['edges'].append(edge) return data def document_co_occurrence(omissions, data): for date in data: for doc in data[date]['docs']: doc_ents = [] if 'nodes' not in doc: doc['nodes'] = {} doc['edges'] = [] for sentence in doc['sentences']: doc_ents.extend(sentence) doc_ents = [ent['word'] for ent in doc_ents if ent['tag'] not in omissions] for ent in set(doc_ents): if ent not in doc['nodes']: doc['nodes'][ent] = {'weight': 0, 'neighbours': {}} ents = combinations(doc_ents, 2) for ent1, ent2 in ents: if ent1 != ent2: if ent2 in doc['nodes'][ent1]['neighbours']: doc['nodes'][ent1]['neighbours'][ent2]['weight'] += 1 else: edge = {'weight': 1, 'nodes': [ent1, ent2]} doc['nodes'][ent1]['neighbours'][ent2] = edge doc['nodes'][ent2]['neighbours'][ent1] = edge doc['edges'].append(edge) return data def compute_significance(counter): sig = {} n_ent = sum(list(counter.values())) for entity in counter: sig[entity] = counter[entity] / n_ent return sig, n_ent def entity_counter(ents, omissions): counter = {} for ent in ents: if ent['tag'] not in omissions: if ent['word'] in counter: counter[ent['word']] += 1 else: counter[ent['word']] = 1 return counter def document_entities(data): for date in data: for doc in data[date]['docs']: doc['ents'] = [] for sentence in doc['sentences']: for ent in sentence: doc['ents'].append(ent) return data def aggregate_dictionary_nodes_and_edges(data): for date in data: nodes, edges = {}, [] for doc in data[date]['docs']: for node in doc['nodes']: nodes[node] = 0 edges.extend(doc['edges']) data[date]['nodes'] = nodes data[date]['edges'] = edges return data def list_data_structure(data): output = [] for date in data: for doc in data[date]['docs']: output.append(doc) return output def strip_content(data): for date in data: docs = [] for doc in data[date]['docs']: del doc['sentences'] docs.append(doc) data[date]['docs'] = docs return data def filter_dates(data, start='31 Dec 2016', end='01 Jan 2018'): start = dt.strptime(start, '%d %b %Y') end = dt.strptime(end, '%d %b %Y') output = {} for date in data: if start < date < end: output[date] = data[date] return output folder = r'demo\output\person' fnames = [ r'\01 sent sig', r'\02 doc sig', r'\03 doc sent sig', r'\04 sent count', r'\05 doc count', r'\06 doc sent count', ] omitted_tags = set(['location', 'organization']) funcs = [ compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, #curry(document_significance)(omitted_tags), curry(sentence_significance)(omitted_tags), #curry(document_co_occurrence)(omitted_tags), #curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, curry(document_significance)(omitted_tags), #curry(sentence_significance)(omitted_tags), #curry(document_co_occurrence)(omitted_tags), #curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, curry(document_significance)(omitted_tags), curry(sentence_significance)(omitted_tags), #curry(document_co_occurrence)(omitted_tags), #curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, #curry(document_significance)(omitted_tags), #curry(sentence_significance)(omitted_tags), #curry(document_co_occurrence)(omitted_tags), curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, #curry(document_significance)(omitted_tags), #curry(sentence_significance)(omitted_tags), curry(document_co_occurrence)(omitted_tags), #curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), compose(calculate_node_degree, aggregate_dictionary_nodes_and_edges, #curry(document_significance)(omitted_tags), #curry(sentence_significance)(omitted_tags), curry(document_co_occurrence)(omitted_tags), curry(sentence_co_occurrence)(omitted_tags), filter_dates, unpack_input, ), ] ###Output _____no_output_____ ###Markdown The below cell will generate graphs using Python data structures ###Code for i, f in enumerate(fnames): data = load_pickle(r'data\wiki.pkl') data = funcs[i](data) save_pickle(folder + r'\graphs' + f + r' dic.pkl', data) lis = list_data_structure(data) del data save_pickle(folder + r'\graphs' + f + r' list.pkl', lis) del lis data = load_pickle(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\08 Network\demo\output\person\graphs\02 doc sig dic.pkl') ###Output _____no_output_____ ###Markdown Networkx graph stuff ###Code def build_graph(data, use_node_weight=True): print('Building graph...') graph = nx.Graph() for node in data['nodes']: if use_node_weight: if node not in graph: graph.add_node(node, weight=data['nodes'][node]['weight']) graph.node[node]['viz'] = {'size': data['nodes'][node]['weight']} else: graph.node[node]['weight'] += data['nodes'][node]['weight'] graph.node[node]['viz']['size'] += data['nodes'][node]['weight'] else: if node not in graph: graph.add_node(node) for edge in tqdm(data['edges']): if graph.has_edge(*edge['nodes']): graph[edge['nodes'][0]][edge['nodes'][1]]['weight'] += edge['weight'] else: graph.add_edge(*edge['nodes'], weight=edge['weight']) return graph def aggregate_list_nodes_and_edges(lis): output = {'nodes': {}, 'edges': []} for doc in lis: for node in doc['nodes']: if node in output['nodes']: output['nodes'][node] += node['weight'] else: output[node] = node output['edges'].extend(doc['edges']) return output def save_as_gexf(filename, graph): nx.write_gexf(graph, os.path.normpath(filename)) def save_as_graphml(filename, graph): nx.write_graphml(graph, os.path.normpath(filename)) def datetime_filter(data, start, end): def datetime_interpretor(*args, default_tzinfo=tz.gettz('UTC'), **kwargs): dt = parse(*args, **kwargs)[0] return dt.replace(tzinfo=dt.tzinfo or default_tzinfo) start = parse(start, fuzzy_with_tokens=True)[0] end = parse(end, fuzzy_with_tokens=True)[0] return [doc for doc in data if start <= doc['datetime'] < end] def plot_hists(input_, colour='r', n_bins=21, use_logx=True, **kwargs): # Now takes a title arguement to enable story titles to stay with stories ''' Takes a list of 1D numpy arrays and Plots histograms. Args: list_1D_arrays - a list of 1D numpy arrays. Returns: histograms - will output a figure of all 1D arrays as histograms. ''' style.use('seaborn') if n_bins > len(input_): n_bins = len(input_) - 2 if use_logx: bins = logspace(log10(min(input_)), log10(max(input_)), n_bins) # arguement list. else: bins = linspace(min(input_), max(input_), n_bins) fig = figure() ax = gca() n, bins, patches = hist(input_, bins, #color=colour, #alpha=1, #density=True, ec='k', rwidth=0.9, #histtype='bar', #facecolor='blue', #log=True, #label='' ) bin_centers = 0.5 * log10(bins[:-1] + bins[1:]) col = bin_centers - min(bin_centers) col /= max(col) cm = plt.cm.get_cmap('twilight_shifted') for c, p in zip(col, patches): plt.setp(p, 'facecolor', cm(c)) # fig.text(0.05, 0.5, 'Frequency', # Y-axis label # horizontalalignment='center', # verticalalignment='center', rotation=90) # fig.text(0.5, 0.01, 'Degree', # X-axis label # horizontalalignment='center', # verticalalignment='center') ax.set_xscale("log") ax.set_yscale("log") xticks(fontsize=14) yticks(fontsize=14) legend(title=kwargs.get('title', ''), title_fontsize=16) fname = kwargs.get('fname', None) if fname is not None: savefig(fname) #fig.legend(loc=(0.16, 0.72), fontsize=9, frameon=False) if kwargs.get('show', False): show() def assign_partition(graph, partition): for node, cluster in partition.items(): graph.node[node]['Cluster'] = cluster return graph def counter_cosine_similarity(c1, c2): terms = set(c1).union(c2) dotprod = sum(c1.get(k, 0) * c2.get(k, 0) for k in terms) mag1 = math.sqrt(sum(c1.get(k, 0)**2 for k in terms)) mag2 = math.sqrt(sum(c2.get(k, 0)**2 for k in terms)) return dotprod / (mag1 * mag2) labels = ['sentence significance', 'document significance', 'sentence and document significance', 'sentence count', 'document count', 'sentence and document count' ] ###Output _____no_output_____ ###Markdown Build graphs and save as .graphml files ###Code for fname in fnames: data = load_pickle(folder + r'\graphs' + fname + ' list.pkl') graph = build_graph(aggregate_list_nodes_and_edges(data), use_node_weight=False) del data save_as_graphml(folder + r'\graphml' + fname + r'.graphml', graph) del graph ###Output Loading pickled data... Building graph... ###Markdown This cell will run the 'effect of co-occurrence rule on degree' experiment ###Code for fname, label in zip(fnames, labels): graph = ig.read(folder + r'\graphml' + fname + r'.graphml') plot_hists( array(graph.strength(weights=graph.es['weight'])), use_logx=True, fname=r'demo\figs' \ + fname \ + ' ' + ' '.join({'person', 'location', 'organization'} - omitted_tags) + ' (weighted).png', title=label) ###Output No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. No handles with labels found to put in legend. ###Markdown The below cell will calculate the nodes with high degree centrality for each co-occurrence rule and save as a excel spreadsheet in the Networkx degree folder ###Code t50_savenames = [folder + r'\degree%s top 50.csv' % f for f in fnames] pkl_savenames = [folder + r'\degree%s degree.pkl' % f for f in fnames] for fname, t50, pkl in zip(fnames, t50_savenames, pkl_savenames): graph = ig.read(folder + r'\graphml' + fname + r'.graphml') unweighted_degree = array(graph.degree()) idx = argsort(-unweighted_degree) unweighted_degree = unweighted_degree[idx] unweighted_ents = array([i['id'] for i in list(graph.vs)])[idx] weighted_degree = array(graph.strength(weights=graph.es['weight'])) idx = argsort(-weighted_degree) weighted_degree = weighted_degree[idx] weighted_ents = array([i['id'] for i in list(graph.vs)])[idx] df = pd.DataFrame([unweighted_ents[: 50], unweighted_degree[: 50], weighted_ents[: 50], weighted_degree[: 50]]) df.T.to_csv(t50) save_pickle(pkl, pd.DataFrame([unweighted_ents, unweighted_degree, weighted_ents, weighted_degree])) del graph del idx del weighted_degree del unweighted_degree del weighted_ents del unweighted_ents del df ###Output Pickling data... Pickling data... Pickling data... Pickling data... Pickling data... Pickling data... ###Markdown The below cell will calculate the nodes with high pagerank centrality for each co-occurrence rule and save as a excel spreadsheet in the Networkx page rank folder ###Code t50_savenames = [folder + r'\page rank%s top 50.csv' % f for f in fnames] pkl_savenames = [folder + r'\page rank%s page rank.pkl' % f for f in fnames] for fname, t50, pkl in zip(fnames, t50_savenames, pkl_savenames): graph = ig.read(folder + r'\graphml' + fname + r'.graphml') unweighted_pagerank = array(graph.pagerank()) idx = argsort(-unweighted_pagerank) unweighted_pagerank = unweighted_pagerank[idx] unweighted_ents = array([i['id'] for i in list(graph.vs)])[idx] weighted_pagerank = array(graph.pagerank(weights=graph.es['weight'])) idx = argsort(-weighted_pagerank) weighted_pagerank = weighted_pagerank[idx] weighted_ents = array([i['id'] for i in list(graph.vs)])[idx] df = pd.DataFrame([unweighted_ents[: 50], unweighted_pagerank[: 50], weighted_ents[: 50], weighted_pagerank[: 50]]) df.T.to_csv(t50) save_pickle(pkl, pd.DataFrame([unweighted_ents, unweighted_pagerank, weighted_ents, weighted_pagerank])) del graph del idx del weighted_pagerank del unweighted_pagerank del weighted_ents del unweighted_ents del df t50_savenames = [folder + r'\eigen%s top 50.csv' % f for f in fnames] pkl_savenames = [folder + r'\eigen%s eigen.pkl' % f for f in fnames] for fname, t50, pkl in zip(fnames, t50_savenames, pkl_savenames): graph = ig.read(folder + r'\graphml' + fname + r'.graphml') unweighted_eigen = array(graph.evcent()) idx = argsort(-unweighted_eigen) unweighted_eigen = unweighted_eigen[idx] unweighted_ents = array([i['id'] for i in list(graph.vs)])[idx] weighted_eigen = array(graph.evcent(weights=graph.es['weight'])) idx = argsort(-weighted_eigen) weighted_eigen = weighted_eigen[idx] weighted_ents = array([i['id'] for i in list(graph.vs)])[idx] df = pd.DataFrame([unweighted_ents[: 50], unweighted_eigen[: 50], weighted_ents[: 50], weighted_eigen[: 50]]) df.T.to_csv(t50) save_pickle(pkl, pd.DataFrame([unweighted_ents, unweighted_eigen, weighted_ents, weighted_eigen])) del graph del idx del weighted_eigen del unweighted_eigen del weighted_ents del unweighted_ents del df fnames = [ r'\01 sent sig', r'\03 doc sent sig', r'\04 sent count', r'\06 doc sent count', ] folder = r'demo\output\person' labels = ['sentence significance', 'sentence and document significance', 'sentence count', 'sentence and document count' ] data = [] fig, axs = subplots(4, 2, figsize=(9, 16), sharey=True) for i, fname, lab in zip(range(4), fnames, labels): style.use('seaborn') graph = ig.read(folder + r'\graphml' + fname + r'.graphml') degrees = array(graph.strength()) eigen = array(graph.evcent()) axs[i, 0].scatter(degrees, eigen, s=3) axs[i, 0].legend(title=lab, title_fontsize=11) weighted_degrees = array(graph.strength(weights=graph.es['weight'])) weighted_eigen = array(graph.evcent(weights=graph.es['weight'])) #page_ranks = array(graph.pagerank()) axs[i, 1].scatter(weighted_degrees, weighted_eigen, s=3) axs[i, 1].legend(title=lab + ' (weighted)', title_fontsize=12) #data.append([[degrees, eigen], [weighted_degrees, weighted_eigen]]) subplots_adjust(wspace=0.1, hspace=0.2) ylabel('eigenvector centrality', fontsize=14) xlabel('degree', fontsize=14) gca().yaxis.set_label_coords(-1.23, 2.2) gca().xaxis.set_label_coords(-0.03, -0.3) fig, axs = subplots(4, 2, figsize=(9, 16), sharey=True) for i, fname, lab in zip(range(4), fnames, labels): style.use('seaborn') graph = ig.read(folder + r'\graphml' + fname + r'.graphml') weighted_degrees = array(graph.strength(weights=graph.es['weight'])) weighted_eigen = array(graph.evcent(weights=graph.es['weight'])) #page_ranks = array(graph.pagerank()) axs[i, 0].scatter(data[i][0][0], data[i][0][1], s=3) axs[i, 0].legend(title=lab, title_fontsize=12) degrees = array(graph.strength()) eigen = array(graph.evcent()) axs[i, 1].scatter(data[i][1][0], data[i][1][1], s=3) axs[i, 1].legend(title=lab + ' (weighted)', title_fontsize=11) #data.append([[weighted_degrees, weighted_eigen], [degrees, eigen]]) subplots_adjust(wspace=0.1, hspace=0.2) ylabel('eigenvector centrality', fontsize=14) xlabel('degree', fontsize=14) gca().yaxis.set_label_coords(-1.23, 2.2) gca().xaxis.set_label_coords(-0.03, -0.3) fnames = [ r'\01 sent sig list per org', r'\02 doc sig list per org', r'\03 doc sent sig list per org', r'\04 sent count list per org', r'\05 doc count list per org', r'\06 doc sent count list per org', ] labels = ['sentence significance', 'document significance', 'sentence and document significance', 'sentence count', 'document count', 'sentence and document count' ] for fname in fnames: data = load_pickle(r'data' + fname + '.pkl') graph = build_graph(aggregate_list_nodes_and_edges(data), use_node_weight=False) del data save_as_graphml(r'graphml' + fname + r'.graphml', graph) del graph for fname, label in zip(fnames, labels): graph = ig.read(r'graphml' + fname + r'.graphml') graph.es.select(weight=0).delete() plot_hists( array(graph.degree()), use_logx=True, fname=r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Thesis__\Figures\Degree co ocurrence rule' \ + fname \ + '.png', title=label) graph = ig.read(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\11 Timeseries\output\person\graphml\01 sent sig.graphml') stop_ents = {'Donald Trump', 'Spot Development', 'United States', 'Ramadan'} eigen = array(graph.evcent()) ents = array([i['id'] for i in list(graph.vs)]) key = {k: v for k, v in zip(ents, eigen)} data = load_pickle( r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\11 Timeseries\output\person\graphs\01 sent sig dic.pkl') del data del graph ###Output _____no_output_____ ###Markdown The below code is for thresholding / removing trends ###Code def load_graph_data(o): with open(os.path.normpath(o['graph_data_path']), 'rb') as open_file: o['input_data'] = pickle.load(open_file) return o def harvest_unique_entities(o): print('Harvesting nodes...') o['entity_set'] = set() progress = tqdm(range(len(o['input_data']))) for date in o['input_data']: for node in o['input_data'][date]['nodes']: o['entity_set'].add(node) progress.update(1) o['entity_set'] = list(o['entity_set']) return o def create_entity_key(o): o['ent_key'] = {ent: i for i, ent in enumerate(o['entity_set'])} return o def create_timeseries(o): print('Building 2d array...') o['arr'] = zeros((len(o['entity_set']), 365)) o['dates'] = sorted(list(o['input_data'].keys())) for i in tqdm(range(365)): for ent in o['input_data'][o['dates'][i]]['nodes']: o['arr'][o['ent_key'][ent], i] = o['input_data'][o['dates'][i]]['nodes'][ent] del o['input_data'] return o def change(o): print('Calculating absolute change over time...') o['arr'] = abs(o['arr'][:, o['change_width']: ] - o['arr'][:, : -o['change_width']]) return o def rolling_window(o): print('Applying rolling functions...') shape = o['arr'].shape[:-1] + (o['arr'].shape[-1] - o['window_size'] + 1, o['window_size']) strides = o['arr'].strides + (o['arr'].strides[-1],) o['rolling_window'] = np.lib.stride_tricks.as_strided(o['arr'], shape=shape, strides=strides) return o def map_functions(o): o['xbar'] = mean(o['rolling_window'], -1) o['sigma'] = std(o['rolling_window'], -1) return o def apply_threshold(o): print('Applying threshold...') o['output'] = abs(o['arr'] - o['xbar']) o['output'][o['output'] <= o['sigma'] * o['threshold']] = 0.0 return o def offset_dates(o): o['dates'] = o['dates'][o['window_size'] - 1: ] return o def offset_timeseries(o): o['arr'] = o['arr'][:, o['window_size'] - 1:] return o def invert_ent_key(o): for ent in list(o['ent_key'].keys()): o['ent_key'][o['ent_key'][ent]] = ent return o def harvest_peaking_entities(o): temp = o['output'].copy() o['peak_depth'] = o.get('peak_depth', 1) o['peaking_entities'] = {date: [] for date in o['dates']} print('Harvesting peaking entities...') for depth in range(1, o['peak_depth'] + 1): print('Peak depth: %d' % depth) peaking_idx = argmax(temp, axis=0) for j in tqdm(range(len(o['dates']))): if temp[peaking_idx[j], j] > 0.0: o['peaking_entities'][o['dates'][j]].append(o['ent_key'][peaking_idx[j]]) temp[peaking_idx] = 0.0 return o def harvest_peaking_entities(o): temp = o['output'].copy() o['peak_depth'] = o.get('peak_depth', 1) o['peaking_entities'] = {date: [] for date in o['dates']} print('Harvesting peaking entities...') for depth in range(1, o['peak_depth'] + 1): print('Peak depth: %d' % depth) peaking_idx = argmax(temp, axis=0) for j in tqdm(range(len(o['dates']))): if temp[peaking_idx[j], j] > 0.0: o['peaking_entities'][o['dates'][j]].append(o['ent_key'][peaking_idx[j]]) temp[peaking_idx] = 0.0 return o def prune_documents(o): with open(os.path.normpath(o['document_data_path']), 'rb') as open_file: data = pickle.load(open_file) progress = tqdm(range(len(data))) print('Pruning documents...') pruned_data = [] for doc in data: date = doc['datetime'] if date in o['peaking_entities']: for ent in o['peaking_entities'][date]: if ent in doc['nodes']: pruned_data.append(doc) break progress.update(1) print('Saving...') with open(os.path.normpath(o['where_to_save']), 'wb') as open_file: pickle.dump(pruned_data, open_file) print('Done.') return o ###Output _____no_output_____ ###Markdown Apply threshold to graph ###Code graph_data_path = r'output\all\graphs\03 doc sent sig dic.pkl' document_data_path = r'output\all\graphs\03 doc sent sig list.pkl' where_to_save = r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\08 Network\data\wiki doc sent sig 1.9.pkl' # If you want to see the effect of applying different thresholds mini_pipeline = compose( apply_threshold, offset_dates, offset_timeseries, map_functions, rolling_window, create_timeseries, create_entity_key, harvest_unique_entities, load_graph_data, ) # Will apply threshold and prune documents full_pipeline = compose( prune_documents, harvest_peaking_entities, invert_ent_key, apply_threshold, offset_dates, offset_timeseries, map_functions, rolling_window, create_timeseries, create_entity_key, harvest_unique_entities, load_graph_data, ) o = full_pipeline( dict( graph_data_path=graph_data_path, document_data_path=document_data_path, where_to_save=where_to_save, window_size=7, # Size of the window we will consider threshold=1.9, # Standard deviations. x > 2 = < 5% of normal distribution peak_depth=1, ), ) # del o def plot_entity_timeseries(o, entity, *args, **kwargs): xformat = kwargs.get('xformat', '%b') style.use(kwargs.get('style', 'seaborn')) figure(figsize=(10, 6)) [plot(o['dates'], o[arg][o['ent_key'][entity]] / max(o[arg][o['ent_key'][entity]])) for arg in args] ax = gca() setp(ax.xaxis.get_majorticklabels(), rotation=45) ax.xaxis.set_major_formatter(DateFormatter(xformat)) ax.tick_params(axis='both', which='major', labelsize=15) ax.tick_params(axis='both', which='minor', labelsize=15) ax.set_xlabel(kwargs.get('xlabel', 'Date (2017)'), fontsize=19) ax.set_ylabel(kwargs.get('ylabel', 'Weighted Degree'), fontsize=19) show() def create_date_range(start, end): start = dt.strptime(start, '%d %b %Y').date() end = dt.strptime(end, '%d %b %Y').date() step = timedelta(days=1) output = [] while start < end: output.append(start) start += step return sorted(output) def create_gtd_timeseries(data, key, val, start='01 Jan 2017', end='01 Jan 2018'): dt_range = create_date_range(start, end) dt_key = {date: i for i, date in enumerate(dt_range)} output = zeros(len(dt_key)) for date in data: for e in data[date]: if e[key] == val: output[dt_key[date]] += 1 return output def gtd_comparison_entity_timeseries(o, tag, gtd_ent, ent, *args, **kwargs): xformat = kwargs.get('xformat', '%b') style.use(kwargs.get('style', 'default')) figure(figsize=(10, 6)) plot(o['dates'], o['output'][o['ent_key'][ent]], c='k', label='weighted degree') ax1 = gca() setp(ax1.xaxis.get_majorticklabels(), rotation=45) ax1.tick_params(axis='both', which='major', labelsize=15) ax1.tick_params(axis='both', which='minor', labelsize=15) ax1.set_xlabel(kwargs.get('xlabel', 'Date (2017)'), fontsize=19) ax1.set_ylabel(kwargs.get('y1label', 'Weighted degree'), fontsize=19) ax1.yaxis.label.set_color('k') data = \ load_pickle(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Data__\GTD\CritIII all regions GTD 1.pkl') start = kwargs.get('start', '01 Jan 2017') end = kwargs.get('end', '01 Jan 2018') gtd_timeseries = create_gtd_timeseries(data, tag, gtd_ent, start, end) dt_range = create_date_range(start, end) dt_key = {date: i for i, date in enumerate(dt_range)} ax2 = gca().twinx() ax2.plot(dt_range, gtd_timeseries, c='r', alpha=0.5) ax2.set_ylabel(kwargs.get('y2label', 'No. of terror events'), fontsize=19) ax2.yaxis.set_label_coords(1.05, 0.5) ax2.tick_params(axis='y', which='major', labelsize=15) ax2.locator_params(nbins=2) ax2.yaxis.label.set_color('r') ax2.xaxis.set_major_formatter(DateFormatter(xformat)) gcf().subplots_adjust(bottom=0.15) show() #ax.tick_params(axis='both', which='major', labelsize=15) #ax.tick_params(axis='both', which='minor', labelsize=15) # gca().set_xlabel('Date (2017)', fontsize=12) # gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) #gca().locator_params(nbins=4) # show() gtd_comparison_entity_timeseries(o, 'city', 'London', 'London') gtd_comparison_entity_timeseries(o, 'city', 'Kabul', 'Kabul') f = r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Thesis__\Figures\Networks\london bridge.graphml' save_as_graphml( f, build_graph( aggregate_list_nodes_and_edges( datetime_filter( load_pickle(where_to_save), '03 Jun 2017', '04 Jun 2017')), use_node_weight=False)) def create_key(partition): key = {} for ent, p in partition.items(): if p not in key: key[p] = [ent] else: key[p].append(ent) return key def remove_stop_ents(igraph, stop_ents): keys = [(v['id'], v.index) for v in igraph.vs] keys = list(sorted(keys, key=lambda x: x[1], reverse=True)) for ent, idx in keys: if ent in stop_ents: igraph.delete_vertices(idx) return igraph def eigen_partition(f, use_weights=False, stop_ents=None): igraph = ig.read(f) igraph = remove_stop_ents(igraph, stop_ents) if use_weights: weights = igraph.es['weight'] else: weights = None temp = igraph.community_leading_eigenvector(weights=weights) partition = {} for cluster, lis in enumerate(temp): for idx in lis: partition[igraph.vs['id'][idx]] = cluster return igraph, partition def get_tops(f, n, n_best, stop_ents=None, use_part_weights=False, use_cent_weights=False, ): igraph, partition = eigen_partition(f, use_weights=use_part_weights, stop_ents=stop_ents) counts = Counter(partition.values()) sum_ = sum(list(counts.values())) counts = {k: v / sum_ * 100 for k, v in counts.items()}.items() key = create_key(partition) if use_cent_weights: weights = igraph.es['weight'] else: weights = None lookup = array(igraph.evcent(weights=weights)) lookup = {k['id']: e for k, e in zip(igraph.vs, igraph.evcent(weights=weights))} maxi = {} for _ in range(n): for p in key: if key[p]: m = key[p][0] idx = {ent['id']: i for ent, i in zip(igraph.vs, range(len(list(igraph.vs))))} for ent in key[p][1:]: if ent in lookup: if lookup[ent]> lookup[m]: m = ent if p in maxi: maxi[p].append(m) else: maxi[p] = [m] igraph.delete_vertices(idx[m]) key[p].remove(m) best = list(sorted(counts, key=lambda x: x[1], reverse=True)) return [(i[1], maxi[i[0]]) for i in best[:n_best]] tops = get_tops(f, 10, 10, use_part_weights=True, use_cent_weights=True, stop_ents=stop_ents) for i in tops: print(i[0], *(j for j in i[1]), sep=', ') def netx_get_tops(f, n, n_best, start, end, stop_ents=None, use_cent_weights=False, ): netx = build_graph( aggregate_list_nodes_and_edges( datetime_filter( load_pickle(where_to_save), start, end)), use_node_weight=False) netx.remove_nodes_from(stop_ents) save_as_graphml(r'temp\temp.graphml', netx) partition = community.best_partition(netx) del netx igraph = ig.read(r'temp\temp.graphml') counts = Counter(partition.values()) sum_ = sum(list(counts.values())) counts = {k: v / sum_ * 100 for k, v in counts.items()}.items() key = create_key(partition) if use_cent_weights: weights = igraph.es['weight'] else: weights = None lookup = array(igraph.strength(weights=weights)) lookup = {k['id']: e for k, e in zip(igraph.vs, lookup)} maxi = {} for _ in range(n): for p in key: if key[p]: m = key[p][0] idx = {ent['id']: i for ent, i in zip(igraph.vs, range(len(list(igraph.vs))))} for ent in key[p][1:]: if ent in lookup: if lookup[ent]> lookup[m]: m = ent if p in maxi: maxi[p].append(m) else: maxi[p] = [m] igraph.delete_vertices(idx[m]) key[p].remove(m) best = list(sorted(counts, key=lambda x: x[1], reverse=True)) return [(i[1], maxi[i[0]]) for i in best[:n_best]] start, end = '03 Jun 2017', '04 Jun 2017' tops = netx_get_tops(where_to_save, 10, 10, start, end, stop_ents=stop_ents, use_cent_weights=True) for i in tops: print(i[0], *(j for j in i[1]), sep=', ') def eigen_cluster(fname, start='31 dec 2016', end='01 Jan 2018', use_weights=False): netx = build_graph( aggregate_list_nodes_and_edges( datetime_filter( load_pickle(fname), start, end)), use_node_weight=False) save_as_graphml(r'temp\temp.graphml', netx) del netx igraph = ig.read(r'temp\temp.graphml') if use_weights: weights = igraph.es['weight'] else: weights = None partition = igraph.community_leading_eigenvector(weights=weights) key = {} for cluster, lis in enumerate(partition): for ent in lis: key[igraph.vs['id'][ent]] = cluster del igraph netx = nx.read_graphml(r'temp\temp.graphml') for ent, cluster in key.items(): netx.node[ent]['partition'] = cluster return netx del o def gtd_doc_count_comparison(data, use_rolling_mean=False, window_size=5, start='01 Jan 2017', end='01 Jan 2018', **kwargs): def rolling_mean(arr, window_size=window_size): shape = arr.shape[:-1] + (arr.shape[-1] - window_size + 1, window_size) strides = arr.strides + (arr.strides[-1],) return mean(np.lib.stride_tricks.as_strided(arr, shape=shape, strides=strides), axis=1) xformat = kwargs.get('xformat', '%b') style.use(kwargs.get('style', 'default')) figure(figsize=(10, 6)) dt_range = create_date_range(start, end) dt_key = {date: i for i, date in enumerate(dt_range)} data_timeseries = zeros(len(dt_range)) for doc in data: if doc['datetime'].date() in dt_key: data_timeseries[dt_key[doc['datetime'].date()]] += 1 if use_rolling_mean: data_timeseries = rolling_mean(data_timeseries) offset = window_size - 1 else: offset = 0 plot(dt_range[offset:], data_timeseries, c='k', label='No. of documents') ax1 = gca() setp(ax1.xaxis.get_majorticklabels(), rotation=45) ax1.tick_params(axis='both', which='major', labelsize=15) ax1.tick_params(axis='both', which='minor', labelsize=15) ax1.set_xlabel(kwargs.get('xlabel', 'Date (2017)'), fontsize=19) ax1.set_ylabel(kwargs.get('y1label', 'No. of documents'), fontsize=19) ax1.yaxis.label.set_color('k') gtd_data = \ load_pickle(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Data__\GTD\CritIII all regions GTD 1.pkl') start = kwargs.get('start', '01 Jan 2017') end = kwargs.get('end', '01 Jan 2018') gtd_timeseries = zeros(len(dt_range)) for date in dt_range: for e in gtd_data[date]: gtd_timeseries[dt_key[date]] += 1 if use_rolling_mean: gtd_timeseries = rolling_mean(gtd_timeseries) ax2 = gca().twinx() ax2.plot(dt_range[offset:], gtd_timeseries, c='r') ax2.set_ylabel(kwargs.get('y2label', 'No. of terror events'), fontsize=19) ax2.yaxis.set_label_coords(1.05, 0.5) ax2.tick_params(axis='y', which='major', labelsize=15) ax2.locator_params(nbins=2) ax2.yaxis.label.set_color('r') ax2.xaxis.set_major_formatter(DateFormatter(xformat)) ax2.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) gcf().subplots_adjust(bottom=0.15) #legend() show() data = load_pickle(r'data\wiki.pkl') gtd_doc_count_comparison(data) gtd_doc_count_comparison(data, use_rolling_mean=True, window_size=14) gtd_doc_count_comparison(load_pickle(where_to_save), use_rolling_mean=True, window_size=15) gtd_doc_count_comparison(load_pickle(where_to_save)) graph = ig.read(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\11 Timeseries\output\person\graphml\01 sent sig.graphml') unweighted_eigen = array(graph.evcent()) idx = argsort(-unweighted_eigen) unweighted_eigen = unweighted_eigen[idx] unweighted_ents = array([i['id'] for i in list(graph.vs)])[idx] weighted_eigen = array(graph.evcent(weights=graph.es['weight'])) idx = argsort(-weighted_eigen) weighted_eigen = weighted_eigen[idx] weighted_ents = array([i['id'] for i in list(graph.vs)])[idx] df1 = pd.DataFrame([unweighted_ents[: 50], unweighted_eigen[: 50]]) df3 = pd.DataFrame([unweighted_ents[: 50], unweighted_eigen[: 50], weighted_ents[: 50], weighted_eigen[: 50]]) df1.T.to_csv(r'temp\t501.csv') df3.T.to_csv(r'temp\t503.csv') data = load_pickle(r'C:\Users\Simon\OneDrive - University of Exeter\__Project__\__Data__\07 Network\Resolved 4.pkl') stop_ents = {'Donald Trump', 'Ramadan', 'United States'} ###Output _____no_output_____
pyspark-advanced/jupyter-caching/Checkpointing - Full.ipynb
###Markdown Checkpointing DataFramesSometimes execution plans can get pretty long and Spark might run into trouble. Common scenarios are iterative algorithms like ML or graph algorithms, which contain a big outer loop and iteratively transform a DataFrame over and over again. This would result in a really huge execution plan.In these cases you could use `cache()` or `persist()` in order to improve performance (otherwise all steps of the loop would be executed again from the very beginning leading to a runtime of O(n^2)). But this will not cut off the lineage.Checkpointing is the right solution for these cases. It will persist the data of a DataFrame in a reliable distributed storage (most commonly HDFS) and cut off the lineage. Create or Reuse Spark Session ###Code from pyspark.sql import SparkSession if not 'spark' in locals(): spark = SparkSession.builder \ .master("local[*]") \ .config("spark.driver.memory","24G") \ .getOrCreate() spark spark.conf.set("spark.sql.adaptive.enabled", False) spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) ###Output _____no_output_____ ###Markdown 1 Load DataWe will load the weather data again for this example. ###Code storageLocation = "s3://dimajix-training/data/weather" ###Output _____no_output_____ ###Markdown 1.1 Load Measurements ###Code from pyspark.sql.functions import * raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) ###Output _____no_output_____ ###Markdown Extract MeasurementsMeasurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple `SELECT` statement. ###Code weather = raw_weather.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) weather.limit(10).toPandas() ###Output _____no_output_____ ###Markdown 1.2 Load Station Metadata ###Code stations = spark.read \ .option("header", True) \ .csv(storageLocation + "/isd-history") # Display first 10 records stations.limit(10).toPandas() ###Output _____no_output_____ ###Markdown 2 Join DataNow we perform the join between the station master data and the measurements, as we did before. ###Code joined_weather = weather.join(stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"])) ###Output _____no_output_____ ###Markdown 3 Truncating Execution PlansNow we want to understand the effect of checkpointing. First we will use the traditional aggregation and print the execution plan. 3.1 Traditional Aggregation ###Code result = joined_weather.groupBy(joined_weather["ctry"], joined_weather["year"]).agg( min(when(joined_weather.air_temperature_qual == lit(1), joined_weather.air_temperature)).alias('min_temp'), max(when(joined_weather.air_temperature_qual == lit(1), joined_weather.air_temperature)).alias('max_temp') ) result.explain(True) ###Output == Parsed Logical Plan == 'Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS max_temp#175] +- AnalysisBarrier +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [year#2, substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, substring(value#0, 16, 8) AS date#7, substring(value#0, 24, 4) AS time#8, substring(value#0, 42, 5) AS report_type#9, substring(value#0, 61, 3) AS wind_direction#10, substring(value#0, 64, 1) AS wind_direction_qual#11, substring(value#0, 65, 1) AS wind_observation#12, (cast(cast(substring(value#0, 66, 4) as float) as double) / cast(10.0 as double)) AS wind_speed#13, substring(value#0, 70, 1) AS wind_speed_qual#14, (cast(cast(substring(value#0, 88, 5) as float) as double) / cast(10.0 as double)) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Project [value#0, 2003 AS year#2] : +- Relation[value#0] text +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Analyzed Logical Plan == ctry: string, year: int, min_temp: double, max_temp: double Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#175] +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [year#2, substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, substring(value#0, 16, 8) AS date#7, substring(value#0, 24, 4) AS time#8, substring(value#0, 42, 5) AS report_type#9, substring(value#0, 61, 3) AS wind_direction#10, substring(value#0, 64, 1) AS wind_direction_qual#11, substring(value#0, 65, 1) AS wind_observation#12, (cast(cast(substring(value#0, 66, 4) as float) as double) / cast(10.0 as double)) AS wind_speed#13, substring(value#0, 70, 1) AS wind_speed_qual#14, (cast(cast(substring(value#0, 88, 5) as float) as double) / cast(10.0 as double)) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Project [value#0, 2003 AS year#2] : +- Relation[value#0] text +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Optimized Logical Plan == Aggregate [ctry#45, 2003], [ctry#45, 2003 AS year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#175] +- Project [air_temperature#15, air_temperature_qual#16, CTRY#45] +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, (cast(cast(substring(value#0, 88, 5) as float) as double) / 10.0) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Filter (isnotnull(substring(value#0, 11, 5)) && isnotnull(substring(value#0, 5, 6))) : +- Relation[value#0] text +- Project [USAF#42, WBAN#43, CTRY#45] +- Filter (isnotnull(usaf#42) && isnotnull(wban#43)) +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Physical Plan == *(3) HashAggregate(keys=[ctry#45, 2003#180], functions=[min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min_temp#173, max_temp#175]) +- Exchange hashpartitioning(ctry#45, 2003#180, 200) +- *(2) HashAggregate(keys=[ctry#45, 2003 AS 2003#180], functions=[partial_min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), partial_max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, 2003#180, min#183, max#184]) +- *(2) Project [air_temperature#15, air_temperature_qual#16, CTRY#45] +- *(2) BroadcastHashJoin [usaf#5, wban#6], [usaf#42, wban#43], Inner, BuildRight :- *(2) Project [substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, (cast(cast(substring(value#0, 88, 5) as float) as double) / 10.0) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- *(2) Filter (isnotnull(substring(value#0, 11, 5)) && isnotnull(substring(value#0, 5, 6))) : +- *(2) FileScan text [value#0] Batched: false, Format: Text, Location: InMemoryFileIndex[s3://dimajix-training/data/weather/2003], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<value:string> +- BroadcastExchange HashedRelationBroadcastMode(List(input[0, string, true], input[1, string, true])) +- *(1) Project [USAF#42, WBAN#43, CTRY#45] +- *(1) Filter (isnotnull(usaf#42) && isnotnull(wban#43)) +- *(1) FileScan csv [USAF#42,WBAN#43,CTRY#45] Batched: false, Format: CSV, Location: InMemoryFileIndex[s3://dimajix-training/data/weather/isd-history], PartitionFilters: [], PushedFilters: [IsNotNull(USAF), IsNotNull(WBAN)], ReadSchema: struct<USAF:string,WBAN:string,CTRY:string> ###Markdown 3.2 Reliable CheckpointingNow we first checkpoint the joined weather data set and then perform the aggregation on the checkpointed DataFrame. Set Checkpoint directoryFirst we need to specify a checkpoint directory on a reliable shared file system. ###Code spark.sparkContext.setCheckpointDir("/tmp/checkpoints") ###Output _____no_output_____ ###Markdown Create checkpointNow we can create a checkpoint for the joined weather. Note that this takes some time, as checkpointing is not a lazy operation, it will be executed immediately. This is also conceptionally neccessary, because one aspect of checkpointing is that the whole lineage gets cut off. So there is no way around executing the computation for materializing the DataFrame inside the checkpoint directory ###Code cp_weather = joined_weather.checkpoint(eager=True) ###Output _____no_output_____ ###Markdown Inspect Checkpoint directory ###Code %%bash hdfs dfs -ls /tmp/checkpoints ###Output Found 1 items drwxr-xr-x - hadoop hadoop 0 2018-10-28 07:37 /tmp/checkpoints/1e08381c-ddda-4d02-876b-07ba3427c9f8 ###Markdown Inspect execution planLet us have a look at the execution plan of the checkpointed DataFrame ###Code cp_weather.explain(True) ###Output == Parsed Logical Plan == AnalysisBarrier +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Analyzed Logical Plan == year: int, usaf: string, wban: string, date: string, time: string, report_type: string, wind_direction: string, wind_direction_qual: string, wind_observation: string, wind_speed: double, wind_speed_qual: string, air_temperature: double, air_temperature_qual: string, USAF: string, WBAN: string, STATION NAME: string, CTRY: string, STATE: string, ICAO: string, LAT: string, LON: string, ELEV(M): string, BEGIN: string, END: string LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Optimized Logical Plan == LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Physical Plan == Scan ExistingRDD[year#2,usaf#5,wban#6,date#7,time#8,report_type#9,wind_direction#10,wind_direction_qual#11,wind_observation#12,wind_speed#13,wind_speed_qual#14,air_temperature#15,air_temperature_qual#16,USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] ###Markdown As you can see, the lineage got lost and is replaced by a `Scan ExistingRDD` which refers to the data in the checkpoint directory. Perform aggregationNow we can perform the aggregation with the checkpointed variant of the joined weather DataFrame. ###Code result = cp_weather.groupBy(cp_weather["ctry"], cp_weather["year"]).agg( min(when(cp_weather.air_temperature_qual == lit(1), cp_weather.air_temperature)).alias('min_temp'), max(when(cp_weather.air_temperature_qual == lit(1), cp_weather.air_temperature)).alias('max_temp') ) result.explain(True) ###Output == Parsed Logical Plan == 'Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS max_temp#249] +- AnalysisBarrier +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Analyzed Logical Plan == ctry: string, year: int, min_temp: double, max_temp: double Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#249] +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Optimized Logical Plan == Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#249] +- Project [year#2, air_temperature#15, air_temperature_qual#16, CTRY#45] +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Physical Plan == *(2) HashAggregate(keys=[ctry#45, year#2], functions=[min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min_temp#247, max_temp#249]) +- Exchange hashpartitioning(ctry#45, year#2, 200) +- *(1) HashAggregate(keys=[ctry#45, year#2], functions=[partial_min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), partial_max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min#256, max#257]) +- *(1) Project [year#2, air_temperature#15, air_temperature_qual#16, CTRY#45] +- Scan ExistingRDD[year#2,usaf#5,wban#6,date#7,time#8,report_type#9,wind_direction#10,wind_direction_qual#11,wind_observation#12,wind_speed#13,wind_speed_qual#14,air_temperature#15,air_temperature_qual#16,USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] ###Markdown As expected, the execution plan now essentially only contains the aggregation in three steps (partial aggregation, shuffle, final aggregation). The lineage of the join is not present any more. 3.3 Unreliable CheckpointingIn addition to *reliable* checkpointing, Spark also supports *unreliable* checkpointing, where the data is not stored in HDFS but on the local worker nodes instead using the caching backend.Note that it is stronlgly discouraged to use unreliable checkpointing with dynamic execution mode, where executors can be freed up again. ###Code cpu_weather = joined_weather.localCheckpoint(eager=True) ###Output _____no_output_____ ###Markdown Inspect Checkpoint dataNow you can see the checkpointed data in the "Storage" section of the web interface. Perform aggregationNow we can perform the aggregation with the checkpointed variant of the joined weather DataFrame. ###Code result = cpu_weather.groupBy(cpu_weather["ctry"], cpu_weather["year"]).agg( min(when(cpu_weather.air_temperature_qual == lit(1), cpu_weather.air_temperature)).alias('min_temp'), max(when(cpu_weather.air_temperature_qual == lit(1), cpu_weather.air_temperature)).alias('max_temp') ) result.explain(True) result.limit(5).toPandas() ###Output _____no_output_____ ###Markdown Checkpointing DataFramesSometimes execution plans can get pretty long and Spark might run into trouble. Common scenarios are iterative algorithms like ML or graph algorithms, which contain a big outer loop and iteratively transform a DataFrame over and over again. This would result in a really huge execution plan.In these cases you could use `cache()` or `persist()` in order to improve performance (otherwise all steps of the loop would be executed again from the very beginning leading to a runtime of O(n^2)). But this will not cut off the lineage.Checkpointing is the right solution for these cases. It will persist the data of a DataFrame in a reliable distributed storage (most commonly HDFS) and cut off the lineage. Create or Reuse Spark Session ###Code from pyspark.sql import SparkSession if not 'spark' in locals(): spark = ( SparkSession.builder.master("local[*]") .config("spark.driver.memory", "24G") .getOrCreate() ) spark spark.conf.set("spark.sql.adaptive.enabled", False) spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) ###Output _____no_output_____ ###Markdown 1 Load DataWe will load the weather data again for this example. ###Code storageLocation = "s3://dimajix-training/data/weather" ###Output _____no_output_____ ###Markdown 1.1 Load Measurements ###Code from pyspark.sql.functions import * raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) ###Output _____no_output_____ ###Markdown Extract MeasurementsMeasurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple `SELECT` statement. ###Code weather = raw_weather.select( col("year"), substring(col("value"), 5, 6).alias("usaf"), substring(col("value"), 11, 5).alias("wban"), substring(col("value"), 16, 8).alias("date"), substring(col("value"), 24, 4).alias("time"), substring(col("value"), 42, 5).alias("report_type"), substring(col("value"), 61, 3).alias("wind_direction"), substring(col("value"), 64, 1).alias("wind_direction_qual"), substring(col("value"), 65, 1).alias("wind_observation"), (substring(col("value"), 66, 4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"), 70, 1).alias("wind_speed_qual"), (substring(col("value"), 88, 5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"), 93, 1).alias("air_temperature_qual"), ) weather.limit(10).toPandas() ###Output _____no_output_____ ###Markdown 1.2 Load Station Metadata ###Code stations = spark.read.option("header", True).csv(storageLocation + "/isd-history") # Display first 10 records stations.limit(10).toPandas() ###Output _____no_output_____ ###Markdown 2 Join DataNow we perform the join between the station master data and the measurements, as we did before. ###Code joined_weather = weather.join( stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"]), ) ###Output _____no_output_____ ###Markdown 3 Truncating Execution PlansNow we want to understand the effect of checkpointing. First we will use the traditional aggregation and print the execution plan. 3.1 Traditional Aggregation ###Code result = joined_weather.groupBy(joined_weather["ctry"], joined_weather["year"]).agg( min( when( joined_weather.air_temperature_qual == lit(1), joined_weather.air_temperature ) ).alias('min_temp'), max( when( joined_weather.air_temperature_qual == lit(1), joined_weather.air_temperature ) ).alias('max_temp'), ) result.explain(True) ###Output == Parsed Logical Plan == 'Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS max_temp#175] +- AnalysisBarrier +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [year#2, substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, substring(value#0, 16, 8) AS date#7, substring(value#0, 24, 4) AS time#8, substring(value#0, 42, 5) AS report_type#9, substring(value#0, 61, 3) AS wind_direction#10, substring(value#0, 64, 1) AS wind_direction_qual#11, substring(value#0, 65, 1) AS wind_observation#12, (cast(cast(substring(value#0, 66, 4) as float) as double) / cast(10.0 as double)) AS wind_speed#13, substring(value#0, 70, 1) AS wind_speed_qual#14, (cast(cast(substring(value#0, 88, 5) as float) as double) / cast(10.0 as double)) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Project [value#0, 2003 AS year#2] : +- Relation[value#0] text +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Analyzed Logical Plan == ctry: string, year: int, min_temp: double, max_temp: double Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#175] +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [year#2, substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, substring(value#0, 16, 8) AS date#7, substring(value#0, 24, 4) AS time#8, substring(value#0, 42, 5) AS report_type#9, substring(value#0, 61, 3) AS wind_direction#10, substring(value#0, 64, 1) AS wind_direction_qual#11, substring(value#0, 65, 1) AS wind_observation#12, (cast(cast(substring(value#0, 66, 4) as float) as double) / cast(10.0 as double)) AS wind_speed#13, substring(value#0, 70, 1) AS wind_speed_qual#14, (cast(cast(substring(value#0, 88, 5) as float) as double) / cast(10.0 as double)) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Project [value#0, 2003 AS year#2] : +- Relation[value#0] text +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Optimized Logical Plan == Aggregate [ctry#45, 2003], [ctry#45, 2003 AS year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#173, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#175] +- Project [air_temperature#15, air_temperature_qual#16, CTRY#45] +- Join Inner, ((usaf#5 = usaf#42) && (wban#6 = wban#43)) :- Project [substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, (cast(cast(substring(value#0, 88, 5) as float) as double) / 10.0) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- Filter (isnotnull(substring(value#0, 11, 5)) && isnotnull(substring(value#0, 5, 6))) : +- Relation[value#0] text +- Project [USAF#42, WBAN#43, CTRY#45] +- Filter (isnotnull(usaf#42) && isnotnull(wban#43)) +- Relation[USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] csv == Physical Plan == *(3) HashAggregate(keys=[ctry#45, 2003#180], functions=[min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min_temp#173, max_temp#175]) +- Exchange hashpartitioning(ctry#45, 2003#180, 200) +- *(2) HashAggregate(keys=[ctry#45, 2003 AS 2003#180], functions=[partial_min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), partial_max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, 2003#180, min#183, max#184]) +- *(2) Project [air_temperature#15, air_temperature_qual#16, CTRY#45] +- *(2) BroadcastHashJoin [usaf#5, wban#6], [usaf#42, wban#43], Inner, BuildRight :- *(2) Project [substring(value#0, 5, 6) AS usaf#5, substring(value#0, 11, 5) AS wban#6, (cast(cast(substring(value#0, 88, 5) as float) as double) / 10.0) AS air_temperature#15, substring(value#0, 93, 1) AS air_temperature_qual#16] : +- *(2) Filter (isnotnull(substring(value#0, 11, 5)) && isnotnull(substring(value#0, 5, 6))) : +- *(2) FileScan text [value#0] Batched: false, Format: Text, Location: InMemoryFileIndex[s3://dimajix-training/data/weather/2003], PartitionFilters: [], PushedFilters: [], ReadSchema: struct<value:string> +- BroadcastExchange HashedRelationBroadcastMode(List(input[0, string, true], input[1, string, true])) +- *(1) Project [USAF#42, WBAN#43, CTRY#45] +- *(1) Filter (isnotnull(usaf#42) && isnotnull(wban#43)) +- *(1) FileScan csv [USAF#42,WBAN#43,CTRY#45] Batched: false, Format: CSV, Location: InMemoryFileIndex[s3://dimajix-training/data/weather/isd-history], PartitionFilters: [], PushedFilters: [IsNotNull(USAF), IsNotNull(WBAN)], ReadSchema: struct<USAF:string,WBAN:string,CTRY:string> ###Markdown 3.2 Reliable CheckpointingNow we first checkpoint the joined weather data set and then perform the aggregation on the checkpointed DataFrame. Set Checkpoint directoryFirst we need to specify a checkpoint directory on a reliable shared file system. ###Code spark.sparkContext.setCheckpointDir("/tmp/checkpoints") ###Output _____no_output_____ ###Markdown Create checkpointNow we can create a checkpoint for the joined weather. Note that this takes some time, as checkpointing is not a lazy operation, it will be executed immediately. This is also conceptionally neccessary, because one aspect of checkpointing is that the whole lineage gets cut off. So there is no way around executing the computation for materializing the DataFrame inside the checkpoint directory ###Code cp_weather = joined_weather.checkpoint(eager=True) ###Output _____no_output_____ ###Markdown Inspect Checkpoint directory ###Code %%bash hdfs dfs -ls /tmp/checkpoints ###Output Found 1 items drwxr-xr-x - hadoop hadoop 0 2018-10-28 07:37 /tmp/checkpoints/1e08381c-ddda-4d02-876b-07ba3427c9f8 ###Markdown Inspect execution planLet us have a look at the execution plan of the checkpointed DataFrame ###Code cp_weather.explain(True) ###Output == Parsed Logical Plan == AnalysisBarrier +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Analyzed Logical Plan == year: int, usaf: string, wban: string, date: string, time: string, report_type: string, wind_direction: string, wind_direction_qual: string, wind_observation: string, wind_speed: double, wind_speed_qual: string, air_temperature: double, air_temperature_qual: string, USAF: string, WBAN: string, STATION NAME: string, CTRY: string, STATE: string, ICAO: string, LAT: string, LON: string, ELEV(M): string, BEGIN: string, END: string LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Optimized Logical Plan == LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Physical Plan == Scan ExistingRDD[year#2,usaf#5,wban#6,date#7,time#8,report_type#9,wind_direction#10,wind_direction_qual#11,wind_observation#12,wind_speed#13,wind_speed_qual#14,air_temperature#15,air_temperature_qual#16,USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] ###Markdown As you can see, the lineage got lost and is replaced by a `Scan ExistingRDD` which refers to the data in the checkpoint directory. Perform aggregationNow we can perform the aggregation with the checkpointed variant of the joined weather DataFrame. ###Code result = cp_weather.groupBy(cp_weather["ctry"], cp_weather["year"]).agg( min( when(cp_weather.air_temperature_qual == lit(1), cp_weather.air_temperature) ).alias('min_temp'), max( when(cp_weather.air_temperature_qual == lit(1), cp_weather.air_temperature) ).alias('max_temp'), ) result.explain(True) ###Output == Parsed Logical Plan == 'Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (air_temperature_qual#16 = 1) THEN air_temperature#15 END) AS max_temp#249] +- AnalysisBarrier +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Analyzed Logical Plan == ctry: string, year: int, min_temp: double, max_temp: double Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#249] +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Optimized Logical Plan == Aggregate [ctry#45, year#2], [ctry#45, year#2, min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS min_temp#247, max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END) AS max_temp#249] +- Project [year#2, air_temperature#15, air_temperature_qual#16, CTRY#45] +- LogicalRDD [year#2, usaf#5, wban#6, date#7, time#8, report_type#9, wind_direction#10, wind_direction_qual#11, wind_observation#12, wind_speed#13, wind_speed_qual#14, air_temperature#15, air_temperature_qual#16, USAF#42, WBAN#43, STATION NAME#44, CTRY#45, STATE#46, ICAO#47, LAT#48, LON#49, ELEV(M)#50, BEGIN#51, END#52], false == Physical Plan == *(2) HashAggregate(keys=[ctry#45, year#2], functions=[min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min_temp#247, max_temp#249]) +- Exchange hashpartitioning(ctry#45, year#2, 200) +- *(1) HashAggregate(keys=[ctry#45, year#2], functions=[partial_min(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END), partial_max(CASE WHEN (cast(air_temperature_qual#16 as int) = 1) THEN air_temperature#15 END)], output=[ctry#45, year#2, min#256, max#257]) +- *(1) Project [year#2, air_temperature#15, air_temperature_qual#16, CTRY#45] +- Scan ExistingRDD[year#2,usaf#5,wban#6,date#7,time#8,report_type#9,wind_direction#10,wind_direction_qual#11,wind_observation#12,wind_speed#13,wind_speed_qual#14,air_temperature#15,air_temperature_qual#16,USAF#42,WBAN#43,STATION NAME#44,CTRY#45,STATE#46,ICAO#47,LAT#48,LON#49,ELEV(M)#50,BEGIN#51,END#52] ###Markdown As expected, the execution plan now essentially only contains the aggregation in three steps (partial aggregation, shuffle, final aggregation). The lineage of the join is not present any more. 3.3 Unreliable CheckpointingIn addition to *reliable* checkpointing, Spark also supports *unreliable* checkpointing, where the data is not stored in HDFS but on the local worker nodes instead using the caching backend.Note that it is stronlgly discouraged to use unreliable checkpointing with dynamic execution mode, where executors can be freed up again. ###Code cpu_weather = joined_weather.localCheckpoint(eager=True) ###Output _____no_output_____ ###Markdown Inspect Checkpoint dataNow you can see the checkpointed data in the "Storage" section of the web interface. Perform aggregationNow we can perform the aggregation with the checkpointed variant of the joined weather DataFrame. ###Code result = cpu_weather.groupBy(cpu_weather["ctry"], cpu_weather["year"]).agg( min( when(cpu_weather.air_temperature_qual == lit(1), cpu_weather.air_temperature) ).alias('min_temp'), max( when(cpu_weather.air_temperature_qual == lit(1), cpu_weather.air_temperature) ).alias('max_temp'), ) result.explain(True) result.limit(5).toPandas() ###Output _____no_output_____
work/4_ref_extraction/2_convert_to_table.ipynb
###Markdown Convert amplicon sequences from reference genomes to table format ###Code import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import re # input REF_FA = '../../../data/genome_vobs/phylo_ampl_dada2/forSeekDeep/refSeqs/{}.fasta' AMPLS = [str(i) for i in range(62)] # output REF_TABLE = 'data/refs.csv' REF_META = 'data/samples_ref.csv' ###Output _____no_output_____ ###Markdown Sequence parser ###Code def parse_ref_fa(ampl): i = 0 ref_meta = [] with open(REF_FA.format(ampl)) as f: for line in f: if line.startswith('>'): uid = '{}.r{}'.format(ampl, i) samples = re.findall('genome=(.*?);', line) else: seq = line.strip() for sample in samples: ref_meta.append({ 's_Sample': sample, 'target':ampl, #'s_Species': '_'.join(sample.capitalize().split('-')[:2]), #'h_popUID': uid, 'consensus':seq }) return ref_meta parse_ref_fa('0')[:2] ###Output _____no_output_____ ###Markdown Convert to table ###Code # parse all sequencing data ref_data = [] for ampl in AMPLS: ref_data.extend(parse_ref_fa(ampl)) ref_data = pd.DataFrame(ref_data) ref_data.sample(3) # write ! mkdir -p data ref_data.to_csv(REF_TABLE, index=False) ###Output _____no_output_____ ###Markdown Sample metadata ###Code # extract species data def get_sp(sample): return '_'.join(sample.capitalize().split('-')[:2]) meta = pd.DataFrame(ref_data.s_Sample.sort_values().unique(), columns=['s_Sample']) meta['Species'] = meta.s_Sample.apply(get_sp) meta['Source'] = 'ref' meta['Sample Set'] = 'ref' meta.sample() # write metadata meta.to_csv(REF_META, index=False) ###Output _____no_output_____ ###Markdown Explore reference amplicon sequences ###Code # marker copy number per genome, red - duplications, blue - missing data # note high duplication level in cracens fig, ax = plt.subplots(figsize=(15,7)) ref_data['amplicon'] = ref_data.target.astype(int) sns.heatmap(ref_data.groupby(['s_Sample','amplicon'])['consensus'].count().unstack().fillna(0), ax=ax, cmap='coolwarm', center=1); # missing target count # affected by distance to An.gambiae and assembly quality (62 - ref_data.groupby('s_Sample')['target'].nunique()).sort_values() # missing genotyping data in in Nyssorhynchus x = ref_data.groupby(['s_Sample','target'])['consensus'].count().unstack().fillna(0).reset_index() x = x[x.s_Sample.str.contains('albimanus') | x.s_Sample.str.contains('darlingi') | x.s_Sample.str.contains('aquasalis')].set_index('s_Sample') print('Amplicons not found per species:\n{}'.format(x.sum(axis=1))) print('Amplicons not found in all species: {}'.format((x.sum()==0).sum())) ###Output Amplicons not found per species: s_Sample anopheles-albimanus-steclachromosomesaalbs2 38.0 anopheles-aquasalis-A_aquasalis_v1 34.0 anopheles-darlingi-coariscaffoldsadarc3 34.0 dtype: float64 Amplicons not found in all species: 21
examples/lm_scorer.ipynb
###Markdown Setup Machine ###Code # Install python. !env DEBIAN_FRONTEND=noninteractive apt-get install -y -qq python3 python3-dev python3-venv python3-pip > /dev/null # Upgrade pip !python -m pip install -qq --upgrade pip # Disable TF info logs %env TF_CPP_MIN_LOG_LEVEL=2 !python --version !pip --version ###Output _____no_output_____ ###Markdown Install lm-scorer ###Code !pip install -qq lm-scorer !pip show lm-scorer ###Output Name: lm-scorer Version: 0.4.0 Summary: Language Model based sentences scoring library Home-page: https://github.com/simonepri/lm-scorer#readme Author: Simone Primarosa Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.6/dist-packages Requires: pip, transformers, torch Required-by: ###Markdown Run the CLI ###Code !lm-scorer -h %%writefile sentences.txt I am going to run a marathon. I am go to run a marathon. # @markdown The LM model to use. MODEL = 'gpt2' #@param ["gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl", "distilgpt2"] # @markdown Reduction strategy used to compute the sentence score out of tokens' probabilities. REDUCE = 'prod' #@param ["prod", "mean", "gmean", "hmean"] # @markdown CUDA device id to use (e.g. 0), a negative value disables CUDA accelleration. CUDA = -1 #@param {type: "number"} # @markdown Number of sentences to simultaneously feed through the model. BATCH_SIZE = 1 #@param {type: "number"} !lm-scorer -m $MODEL -r $REDUCE -b $BATCH_SIZE --cuda $CUDA -lp -t sentences.txt ###Output I am going to run a marathon. -32.551 I -3.9997 Ġam -3.0501 Ġgoing -3.4329 Ġto -0.075371 Ġrun -5.6097 Ġa -1.6184 Ġmarathon -5.9767 . -2.3148 <|endoftext|> -6.4729 I am go to run a marathon. -42.834 I -3.9997 Ġam -3.0501 Ġgo -10.555 Ġto -4.5773 Ġrun -6.7562 Ġa -1.872 Ġmarathon -3.8127 . -1.9487 <|endoftext|> -6.2627 ###Markdown Setup Machine ###Code # Install python. !env DEBIAN_FRONTEND=noninteractive apt-get install -y -qq python3 python3-dev python3-venv python3-pip > /dev/null # Upgrade pip !python -m pip install -qq --upgrade pip # Disable TF info logs %env TF_CPP_MIN_LOG_LEVEL=2 !python --version !pip --version ###Output _____no_output_____ ###Markdown Install lm-scorer ###Code !pip install -qq lm-scorer !pip show lm-scorer ###Output Name: lm-scorer Version: 0.5.0 Summary: Language Model based sentences scoring library Home-page: https://github.com/simonepri/lm-scorer#readme Author: Simone Primarosa Author-email: [email protected] License: MIT Location: /Users/gregory/TMP/lm-scorer/.venv/lib/python3.9/site-packages Requires: pip, torch, transformers Required-by: ###Markdown Run the CLI ###Code !lm-scorer -h %%writefile sentences.txt I am going to run a marathon. I am go to run a marathon. # @markdown The LM model to use. MODEL = 'gpt2' #@param ["gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl", "distilgpt2"] # @markdown Reduction strategy used to compute the sentence score out of tokens' probabilities. REDUCE = 'prod' #@param ["prod", "mean", "gmean", "hmean"] # @markdown CUDA device id to use (e.g. 0), a negative value disables CUDA accelleration. CUDA = -1 #@param {type: "number"} # @markdown Number of sentences to simultaneously feed through the model. BATCH_SIZE = 1 #@param {type: "number"} !lm-scorer -m $MODEL -r $REDUCE -b $BATCH_SIZE --cuda $CUDA -lp -t sentences.txt ###Output I am going to run a marathon. -32.551 I -3.9997 Ġam -3.0501 Ġgoing -3.4329 Ġto -0.075378 Ġrun -5.6097 Ġa -1.6184 Ġmarathon -5.9767 . -2.3148 <|endoftext|> -6.4729 I am go to run a marathon. -42.834 I -3.9997 Ġam -3.0501 Ġgo -10.555 Ġto -4.5773 Ġrun -6.7562 Ġa -1.8721 Ġmarathon -3.8127 . -1.9487 <|endoftext|> -6.2627 ###Markdown Setup Machine ###Code # Install python. !env DEBIAN_FRONTEND=noninteractive apt-get install -y -qq python3 python3-dev python3-venv python3-pip > /dev/null # Upgrade pip !python -m pip install -qq --upgrade pip # Disable TF info logs %env TF_CPP_MIN_LOG_LEVEL=2 !python --version !pip --version ###Output Python 3.6.9 pip 20.0.2 from /usr/local/lib/python3.6/dist-packages/pip (python 3.6) ###Markdown Install lm-scorer ###Code !pip install -qq lm-scorer !pip show lm-scorer ###Output Name: lm-scorer Version: 0.3.0 Summary: Language Model based sentences scoring library Home-page: https://github.com/simonepri/lm-scorer#readme Author: Simone Primarosa Author-email: [email protected] License: MIT Location: /usr/local/lib/python3.6/dist-packages Requires: transformers, torch, pip Required-by: ###Markdown Run the CLI ###Code !lm-scorer -h %%writefile sentences.txt I am going to run a marathon. I am go to run a marathon. # @markdown The LM model to use. MODEL = 'gpt2' #@param ["gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl", "distilgpt2"] # @markdown Reduction strategy used to compute the sentence score out of tokens' probabilities. REDUCE = 'prod' #@param ["prod", "mean", "gmean", "hmean"] # @markdown CUDA device id to use (e.g. 0), a negative value disables CUDA accelleration. CUDA = -1 #@param {type: "number"} !lm-scorer -m $MODEL -r $REDUCE --cuda $CUDA -lp -t sentences.txt ###Output I am going to run a marathon. -32.551 I -3.9997 Ġam -3.0501 Ġgoing -3.4329 Ġto -0.075371 Ġrun -5.6097 Ġa -1.6184 Ġmarathon -5.9767 . -2.3148 <|endoftext|> -6.4729 I am go to run a marathon. -42.834 I -3.9997 Ġam -3.0501 Ġgo -10.555 Ġto -4.5773 Ġrun -6.7563 Ġa -1.872 Ġmarathon -3.8127 . -1.9487 <|endoftext|> -6.2627
tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb
###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json with open('workshop_license_keys_365.json') as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown 1. Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_event = NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] ner_events_clinical download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) results[0]['ner_chunks'] ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 2. Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown 3. Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.43** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) events_ner_tagger.getClasses() loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4. Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5. Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(ddi_model) loaded_lmodel = LightPipeline(ddi_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown 6. Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(chemprot_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code data = spark.read.option("header","true").format("csv").load("i2b2_clinical_relfeatures.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output _____no_output_____ ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .load("ner_clinical")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output re_clinical download started this may take some time. Approximate size to download 6 MB [OK!] ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json with open('workshop_license_keys_365.json') as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentence", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentence", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentence", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re", "en", "clinical/models")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_df) lmodel = sparknlp.base.LightPipeline(model) ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results): rel_pairs=[] for rel in results[0]['relations']: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown Clinical RE The set of relations defined in the 2010 i2b2 relation challengeTrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical_large", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_df) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code data = spark.read.option("header","true").format("csv").load("i2b2_clinical_relfeatures.csv") data.show(10) #data = data.select('dataset','sentence','chunk1','begin1','end1','label1','chunk2','begin2','end2','label2','rel') rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .where(valid_rel_query)\ .withColumn("begin1i", F.expr("cast(begin1 AS Int)"))\ .withColumn("end1i", F.expr("cast(end1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(begin2 AS Int)"))\ .withColumn("end2i", F.expr("cast(end2 AS Int)")) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_clinical_large", "en", "clinical/models")\ .setInputCols(["sentences", "tokens", "embeddings"])\ .setOutputCol("ner_tags") ner_converter = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["document", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setValidationSplit(0.2)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2") train_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, reApproach ]) rel_model = train_pipeline.fit(train_data) rel_model.stages[-1] rel_model.stages[-1].write().overwrite().save('models/custom_RE_model') result = rel_model.transform(test_data) result.show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output +--------+---------+-------------+-----------+-----------------------------+---------+-------------+-----------+-------------------------------------------------------------------+----------+ |relation| entity1|entity1_begin|entity1_end| chunk1| entity2|entity2_begin|entity2_end| chunk2|confidence| +--------+---------+-------------+-----------+-----------------------------+---------+-------------+-----------+-------------------------------------------------------------------+----------+ | TeRP| TEST| 0| 13| An angiography| PROBLEM| 22| 44| bleeding in two vessels|0.99999714| | TeRP| TEST| 0| 13| An angiography|TREATMENT| 108| 116| embolized| 1.0| | PIP| PROBLEM| 22| 44| bleeding in two vessels|TREATMENT| 108| 116| embolized| 0.99354| | TeRP|TREATMENT| 0| 11| His coumadin| PROBLEM| 44| 58| his acute bleed| 0.999895| | TeRP| TEST| 24| 43| a flex sigmoidoscopy| PROBLEM| 78| 106| old blood in the rectal vault| 0.999998| | TeRP| TEST| 24| 43| a flex sigmoidoscopy| PROBLEM| 115| 139| active source of bleeding| 1.0| | PIP| PROBLEM| 78| 106|old blood in the rectal vault| PROBLEM| 115| 139| active source of bleeding|0.99999917| | TeRP| TEST| 24| 43| a flex sigmoidoscopy| PROBLEM| 78| 106| old blood in the rectal vault| 0.999998| | TeRP| TEST| 24| 43| a flex sigmoidoscopy| PROBLEM| 115| 139| active source of bleeding| 1.0| | PIP| PROBLEM| 78| 106|old blood in the rectal vault| PROBLEM| 115| 139| active source of bleeding|0.99999917| | TeRP| TEST| 50| 62| a colonoscopy| PROBLEM| 84| 91| bleeding|0.99244905| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 71| 83| mucosal signs| 0.9999101| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 88| 114| moderate ulcerative colitis|0.99999976| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 121| 126| polyps| 0.9999995| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 88| 114| moderate ulcerative colitis| 0.9999871| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 121| 126| polyps|0.99984336| | TeRP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 121| 126| polyps| 0.9999925| | TeRP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 0.9996086| | TeRP| PROBLEM| 121| 126| polyps| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 71| 83| mucosal signs| 0.9999101| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 88| 114| moderate ulcerative colitis|0.99999976| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 121| 126| polyps| 0.9999995| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 88| 114| moderate ulcerative colitis| 0.9999871| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 121| 126| polyps|0.99984336| | TeRP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 121| 126| polyps| 0.9999925| | TeRP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 0.9996086| | TeRP| PROBLEM| 121| 126| polyps| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 71| 83| mucosal signs| 0.9999101| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 88| 114| moderate ulcerative colitis|0.99999976| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 121| 126| polyps| 0.9999995| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 88| 114| moderate ulcerative colitis| 0.9999871| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 121| 126| polyps|0.99984336| | TeRP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 121| 126| polyps| 0.9999925| | TeRP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 0.9996086| | TeRP| PROBLEM| 121| 126| polyps| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 71| 83| mucosal signs| 0.9999101| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 88| 114| moderate ulcerative colitis|0.99999976| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 121| 126| polyps| 0.9999995| | TeRP| TEST| 19| 38| a repeat colonoscopy| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 88| 114| moderate ulcerative colitis| 0.9999871| | PIP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 121| 126| polyps|0.99984336| | TeRP| PROBLEM| 71| 83| mucosal signs| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 1.0| | PIP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 121| 126| polyps| 0.9999925| | TeRP| PROBLEM| 88| 114| moderate ulcerative colitis| PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 0.9996086| +--------+---------+-------------+-----------+-----------------------------+---------+-------------+-----------+-------------------------------------------------------------------+----------+ only showing top 50 rows ###Markdown Load trained model from disk ###Code loaded_re_Model = RelationExtractionModel()\ .load("models/custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("sentence") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json with open('workshop_license_keys_365.json') as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown 1. Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_event = NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] ner_events_clinical download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) results[0]['ner_chunks'] ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 2. Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown 3. Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.69** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) events_ner_tagger.getClasses() loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4. Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5. Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(ddi_model) loaded_lmodel = LightPipeline(ddi_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown 6. Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(chemprot_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code data = spark.read.option("header","true").format("csv").load("i2b2_clinical_relfeatures.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output _____no_output_____ ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .load("ner_clinical")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) 10. Clinical Relation Extraction Model Colab Setup ###Code import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(secret, params=params) ###Output _____no_output_____ ###Markdown 1. Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import functools import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_event = NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] ner_events_clinical download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) results[0]['ner_chunks'] ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 2. Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown 3. Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.43** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) events_ner_tagger.getClasses() loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4. Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5. Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(ddi_model) loaded_lmodel = LightPipeline(ddi_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown 6. Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(chemprot_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/i2b2_clinical_rel_dataset.csv # if you need to custoize the DL arcitecture (more layers, more features etc.) from sparknlp_jsl.training import tf_graph %tensorflow_version 1.x tf_graph.build("relation_extraction", build_params={"input_dim": 6000, "output_dim": 3, 'batch_norm':1, "hidden_layers": [300, 200], "hidden_act": "relu", 'hidden_act_l2':1}, model_location=".", model_filename="re_with_BN") tf_graph.print_model_params("relation_extraction") data = spark.read.option("header","true").format("csv").load("i2b2_clinical_rel_dataset.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output +--------+---------+-------------+-----------+--------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ |relation| entity1|entity1_begin|entity1_end| chunk1|entity2|entity2_begin|entity2_end| chunk2|confidence| +--------+---------+-------------+-----------+--------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ | TrAP|TREATMENT| 3| 9| lotions|PROBLEM| 34| 42| incisions|0.51796293| | PIP| PROBLEM| 196| 239| an inferior and right ventricular infarction|PROBLEM| 145| 176| 1-mm st depressions in i and avl|0.75891846| | TeRP| TEST| 1| 20| abdominal ultrasound|PROBLEM| 54| 71| gallbladder sludge| 0.705603| | TrAP|TREATMENT| 99| 133| ir placement of a drainage catheter|PROBLEM| 139| 173| his abdominopelvic fluid collection| 0.9274658| | TeRP| TEST| 1| 10| urinalysis|PROBLEM| 72| 95| positive red blood cells| 0.5889108| | TeRP| TEST| 1| 8| a ct abd|PROBLEM| 16| 36| 8mm obstructing stone| 0.6372486| | TeRP| TEST| 1| 9| pathology|PROBLEM| 19| 77|poorly differentiated squamous cell carcinoma of the cervix| 0.6519883| | TeRP| TEST| 1| 14| urine cultures|PROBLEM| 36| 49| klebsiella uti| 0.5045418| | TrAP|TREATMENT| 96| 99| cpap|PROBLEM| 57| 76| abdominal distention|0.65509385| | TrNAP|TREATMENT| 119| 126| diuresis|PROBLEM| 1| 20| creatinine elevation| 0.6303186| | TrAP|TREATMENT| 10| 15| repair|PROBLEM| 20| 32| neck fracture| 0.7149087| | TrCP|TREATMENT| 36| 85|percutaneous endoscopic gastrostomy tube placement|PROBLEM| 3| 22| failed swallow study| 0.9386631| | TrAP|TREATMENT| 40| 85| postradical cystoprostatectomy with ileal loop|PROBLEM| 91| 122| locally invasive prostate cancer|0.66049594| | TrAP|TREATMENT| 90| 99| management|PROBLEM| 104| 129| her pulmonary hypertension|0.64217955| | TeRP| TEST| 38| 58| previous examinations|PROBLEM| 1| 15| this ecchymosis| 0.5188841| | TeRP| TEST| 1| 14| the ultrasound|PROBLEM| 22| 35| biliary sludge| 0.7225746| | TrAP|TREATMENT| 117| 137| intravenous diltiazem|PROBLEM| 40| 58| atrial fibrillation|0.58163214| | TeRP| PROBLEM| 1| 17| the complications|PROBLEM| 64| 80| pulmonary embolus| 0.6874196| | TrAP|TREATMENT| 14| 20| removal|PROBLEM| 25| 33| cbd stone|0.55907726| | TrCP|TREATMENT| 22| 37| the hickman site|PROBLEM| 8| 15| bleeding|0.51851994| | TrCP| TEST| 79| 101| frozen section analysis|PROBLEM| 70| 74| tumor| 0.6411875| | TrAP| PROBLEM| 341| 362| acanthamoeba sinusitis|PROBLEM| 309| 336| any bacterial superinfection|0.67171013| | TrAP|TREATMENT| 240| 251| levofloxacin|PROBLEM| 341| 362| acanthamoeba sinusitis| 0.7567457| | TrNAP|TREATMENT| 45| 54| vancomycin|PROBLEM| 149| 190| enteric and non-enteric gram negative rods|0.52105546| | TrNAP| TEST| 17| 31| mediastinoscopy|PROBLEM| 185| 195| anthracosis|0.52138793| | PIP| PROBLEM| 55| 96| an additional dysfunction on the left side|PROBLEM| 109| 140| wide-spread cortical dysfunction| 0.7988863| | TeRP| TEST| 1| 3| mri|PROBLEM| 44| 79| sz effects in the left temporal lobe| 0.6245786| | TrIP|TREATMENT| 38| 45| morphine|PROBLEM| 51| 54| pain| 0.661562| | PIP| PROBLEM| 138| 171| anteroseptal myocardial infarction|PROBLEM| 93| 112| q waves in leads iii| 0.7123669| | PIP| PROBLEM| 62| 78| myasthenia gravis|PROBLEM| 106| 112| thymoma| 0.5486303| | TrAP|TREATMENT| 167| 176| vancomycin|PROBLEM| 232| 241| gi sources|0.71740705| | TeCP| TEST| 1| 17| an echocardiogram|PROBLEM| 35| 58| a dilated left ventricle|0.69256866| | TrAP|TREATMENT| 74| 84| clindamycin|PROBLEM| 34| 48| his throat pain| 0.8609432| | TeRP| PROBLEM| 1| 36| his gastrointestinal bleeding issues|PROBLEM| 151| 202| an non-steroidal anti-inflammatory drugs gastropathy|0.74512297| | TeCP| TEST| 1| 6| an ekg|PROBLEM| 14| 34| st segment depression|0.79695517| | TrAP|TREATMENT| 56| 65| management|PROBLEM| 70| 91| the increased bleeding|0.77564555| | PIP| PROBLEM| 113| 142| acanthamoeba infection of skin|PROBLEM| 185| 204| neurologic infection| 0.5163852| | TeRP| TEST| 1| 34| subsequent cardiac catheterization|PROBLEM| 108| 148| severe two vessel coronary artery disease|0.70991594| | TrAP|TREATMENT| 148| 171| post-operative narcotics|PROBLEM| 111| 133| depressed mental status|0.56667066| | TeRP| TEST| 1| 11| chest x-ray|PROBLEM| 29| 38| infiltrate|0.66307384| | TeRP| TEST| 1| 15| his chest x-ray|PROBLEM| 23| 61| diffuse patchy infiltrates on left lung| 0.5446951| | PIP| PROBLEM| 93| 120| a left lower lobe infiltrate|PROBLEM| 124| 132| pneumonia|0.57581437| | TeRP| TEST| 1| 30| a computerized tomography scan|PROBLEM| 138| 183| proximal middle cerebral artery stem occlusion| 0.8552488| | TrNAP| PROBLEM| 182| 190| gastritis|PROBLEM| 23| 41| barrett's esophagus| 0.7165929| | TeCP| TEST| 51| 69| coagulopathy workup|PROBLEM| 29| 47| the subgaleal bleed| 0.648941| | TeRP| TEST| 22| 53| a perfusion , diffusion mr study|PROBLEM| 100| 114| viable penumbra| 0.5115292| | TeRP| TEST| 1| 9| a ct scan|PROBLEM| 77| 94| sphenoid sinusitis| 0.8000102| | PIP| PROBLEM| 89| 111| duke &apos;s grade b-ii|PROBLEM| 63| 77| colon carcinoma|0.63523185| | TeRP| TEST| 1| 17| an echocardiogram|PROBLEM| 134| 144| hypokinesis|0.75922495| | TeCP| TEST| 344| 354| chest x-ray|PROBLEM| 382| 406| small bilateral effusions| 0.5533738| +--------+---------+-------------+-----------+--------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ only showing top 50 rows ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical",'en','clinical/models')\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output ner_clinical download started this may take some time. Approximate size to download 13.8 MB [OK!] ###Markdown End-to-end trained Models using BioBERT Latest addition to Spark NLP for Heathcare - (Requires Spark NLP 2.7.3+ and Spark NLP JSL 2.7.3+) These models are trained as end-to-end bert models using BioBERT and ported in to the Spark NLP ecosystem. They offer SOTA performance on most benchmark tasks and outperform our existing Relation Extraction Models. 2.1 Clinical ReDL ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"])# we can set the possible relation pairs (if not set, all the relations will be calculated) clinical_re_Model = RelationExtractionDLModel() \ .pretrained('redl_clinical_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_ner_chunk_filter, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df ###Output _____no_output_____ ###Markdown 3.1 Clinical Temporal Events ReDL ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") events_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks") events_re_Model = RelationExtractionDLModel() \ .pretrained('redl_temporal_events_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.5)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, events_re_ner_chunk_filter, events_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4.1 Human Phenotype - Gene ReDL ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) pgr_re_Model = RelationExtractionDLModel() \ .pretrained('redl_human_phenotype_gene_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.5)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_ner_chunk_filter, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5.1 Drug-Drug Interaction ReDL ###Code ddi_ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ddi_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) ddi_re_Model = RelationExtractionDLModel() \ .load('redl_drug_drug_interaction_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ddi_ner_tagger, ner_chunker, dependency_parser, ddi_re_ner_chunk_filter, ddi_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 6.1 Chemical–Protein Interactions (ChemProt) ReDL ###Code chemprot_ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") chemprot_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) chemprot_re_Model = RelationExtractionDLModel() \ .pretrained('redl_chemprot_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, chemprot_ner_tagger, ner_chunker, dependency_parser, chemprot_re_ner_chunk_filter, chemprot_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json with open('workshop_license_keys_365.json') as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re", "en", "clinical/models")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results): rel_pairs=[] for rel in results[0]['relations']: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown Clinical RE The set of relations defined in the 2010 i2b2 relation challengeTrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code data = spark.read.option("header","true").format("csv").load("i2b2_clinical_relfeatures.csv") data.show(10) #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["document", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setLearningRate(0.001)\ .setModelFile("/content/RE.in1200D.out20.pb")\ .setFixImbalance(True)\ .setValidationSplit(0.2)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2") finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output +--------+---------+-------------+-----------+----------------------------------+-------+-------------+-----------+-------------------------------------------------------------------+----------+ |relation| entity1|entity1_begin|entity1_end| chunk1|entity2|entity2_begin|entity2_end| chunk2|confidence| +--------+---------+-------------+-----------+----------------------------------+-------+-------------+-----------+-------------------------------------------------------------------+----------+ | TeRP| TEST| 1| 14| an angiography|PROBLEM| 22| 44| bleeding in two vessels| 1.0| | TrNAP|TREATMENT| 1| 12| his coumadin|PROBLEM| 44| 58| his acute bleed| 0.5183256| | TeCP| TEST| 24| 43| a flex sigmoidoscopy|PROBLEM| 78| 106| old blood in the rectal vault| 0.8299348| | TeRP| TEST| 24| 43| a flex sigmoidoscopy|PROBLEM| 115| 139| active source of bleeding|0.99586946| | TeCP| TEST| 50| 62| a colonoscopy|PROBLEM| 84| 91| bleeding|0.99960357| | PIP| PROBLEM| 71| 83| mucosal signs|PROBLEM| 88| 114| moderate ulcerative colitis|0.99695516| | TeRP| TEST| 19| 38| a repeat colonoscopy|PROBLEM| 71| 83| mucosal signs|0.99951684| | TeRP| TEST| 19| 38| a repeat colonoscopy|PROBLEM| 88| 114| moderate ulcerative colitis|0.99999833| | TeRP| TEST| 19| 38| a repeat colonoscopy|PROBLEM| 133| 199|8 mm ulcer at junction of distal descending colon and sigmoid colon| 0.9996481| | TeRP| TEST| 19| 38| a repeat colonoscopy|PROBLEM| 121| 126| polyps|0.98263663| | TrAP|TREATMENT| 24| 44| the patient 's asacol|PROBLEM| 2| 19| ulcerative colitis| 0.9990374| | TrWP|TREATMENT| 41| 56| his home regimen|PROBLEM| 2| 8| cad/chf| 0.628876| | TrNAP|TREATMENT| 41| 56| his home regimen|PROBLEM| 110| 127| his acute gi bleed| 0.8706728| | TrAP|TREATMENT| 40| 63| his home insulin regimen|PROBLEM| 2| 5| dm-2|0.63458395| | PIP| PROBLEM| 77| 90| recurrent dvts|PROBLEM| 107| 110| a pe| 1.0| | TrAP|TREATMENT| 155| 179| long-term anticoagulation|PROBLEM| 107| 110| a pe|0.99857485| | TrAP|TREATMENT| 155| 179| long-term anticoagulation|PROBLEM| 77| 90| recurrent dvts| 0.8396774| | TrCP|TREATMENT| 75| 94| v. winchester filter|PROBLEM| 36| 43| bleeding|0.54132706| | TrAP|TREATMENT| 4| 15| pioglitazone|PROBLEM| 86| 87| dm| 0.9999994| | TrAP|TREATMENT| 4| 13| mesalamine|PROBLEM| 147| 164| ulcerative colitis| 1.0| | PIP| PROBLEM| 147| 164| ulcerative colitis|PROBLEM| 177| 189| severe flares| 1.0| | TeRP| TEST| 1| 21| mesenteric angiograpm|PROBLEM| 46| 60| bleeding vessel| 0.9948527| | TrAP|TREATMENT| 25| 41| coil embolization|PROBLEM| 46| 60| bleeding vessel| 0.9995956| | TrAP|TREATMENT| 48| 51| cabg|PROBLEM| 40| 42| cad| 0.9996532| | TrAP|TREATMENT| 65| 90| long term anti-coagulation|PROBLEM| 55| 60| dvt/pe|0.99995995| | PIP| PROBLEM| 94| 111| ulcerative colitis|PROBLEM| 137| 141| brbpr| 1.0| | TrAP|TREATMENT| 116| 121| asacol|PROBLEM| 94| 111| ulcerative colitis| 0.6507673| | PIP| PROBLEM| 18| 37| lower abdominal pain|PROBLEM| 73| 81| a symptom| 0.9999877| | TrAP|TREATMENT| 29| 41| ciprofloxacin|PROBLEM| 47| 51| a uti|0.99999857| | TeRP| TEST| 9| 22| an initial dre|PROBLEM| 36| 39| clot| 0.9999976| | TrAP|TREATMENT| 12| 18| 3v-cabg|PROBLEM| 4| 6| cad| 0.9998925| | TrAP|TREATMENT| 31| 39| pacemaker|PROBLEM| 4| 25| sinus node dysfunction| 0.9999317| | TrAP|TREATMENT| 66| 73| warfarin|PROBLEM| 43| 44| pe| 0.9993474| | TrAP|TREATMENT| 66| 73| warfarin|PROBLEM| 8| 10| dvt| 0.9997149| | PIP| PROBLEM| 8| 10| dvt|PROBLEM| 43| 44| pe|0.98846567| | TeRP| PROBLEM| 13| 37| increased tracer activity|PROBLEM| 146| 160| active bleeding| 0.9550868| | PIP| PROBLEM| 24| 57|left adrenal fat-containing lesion|PROBLEM| 75| 87| a myelolipoma| 1.0| | PIP| PROBLEM| 84| 108| a long-standing complaint|PROBLEM| 55| 60| nausea| 0.9999863| | PIP| PROBLEM| 84| 108| a long-standing complaint|PROBLEM| 66| 73| vomiting| 0.9997868| | TrAP|TREATMENT| 58| 66| resection|PROBLEM| 71| 108| an abscess in the left lower extremity| 0.9999976| | TrCP|TREATMENT| 98| 100| ddi|PROBLEM| 86| 93| vomiting| 0.9989371| | TrCP|TREATMENT| 98| 100| ddi|PROBLEM| 69| 80| pancytopenia| 0.9695827| | TrAP|TREATMENT| 17| 43| any anti retroviral therapy|PROBLEM| 86| 93| vomiting| 0.9931377| | TrCP|TREATMENT| 17| 43| any anti retroviral therapy|PROBLEM| 69| 80| pancytopenia|0.99991405| | PIP| PROBLEM| 14| 21| the pain|PROBLEM| 26| 39| a burning pain| 0.996968| | TeRP| TEST| 19| 27| a ct scan|PROBLEM| 42| 82| fatty infiltration of her liver diffusely| 1.0| | TeRP| TEST| 19| 27| a ct scan|PROBLEM| 89| 130| a 1 cm cyst in the right lobe of the liver|0.99998033| | TeRP| TEST| 1| 24| her alkaline phosphatase|PROBLEM| 29| 45| slightly elevated| 0.9999999| | TeRP| TEST| 1| 11| her amylase|PROBLEM| 16| 30| mildly elevated| 1.0| | TeCP| TEST| 164| 176| an evaluation|PROBLEM| 181| 214| new abscess in her left lower calf|0.87957954| +--------+---------+-------------+-----------+----------------------------------+-------+-------------+-----------+-------------------------------------------------------------------+----------+ only showing top 50 rows ###Markdown Load trained model from disk ###Code loaded_re_Model = RelationExtractionModel() \ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("sentence") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json with open('workshop_license_keys_365.json') as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown 1. Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_event = NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] ner_events_clinical download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) results[0]['ner_chunks'] ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 2. Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown 3. Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.43** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) events_ner_tagger.getClasses() loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4. Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5. Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(ddi_model) loaded_lmodel = LightPipeline(ddi_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown 6. Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(chemprot_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/i2b2_clinical_rel_dataset.csv # if you need to custoize the DL arcitecture (more layers, more features etc.) from sparknlp_jsl.training import tf_graph %tensorflow_version 1.x tf_graph.build("relation_extraction", build_params={"input_dim": 6000, "output_dim": 3, 'batch_norm':1, "hidden_layers": [300, 200], "hidden_act": "relu", 'hidden_act_l2':1}, model_location=".", model_filename="re_with_BN") tf_graph.print_model_params("relation_extraction") data = spark.read.option("header","true").format("csv").load("i2b2_clinical_rel_dataset.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output _____no_output_____ ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .load("ner_clinical")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output re_clinical download started this may take some time. Approximate size to download 6 MB [OK!] ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) Clinical Relation Extraction Model Colab Setup ###Code import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) license_keys.keys() import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version secret = license_keys['SECRET'] os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE'] os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY'] version = license_keys['PUBLIC_VERSION'] jsl_version = license_keys['JSL_VERSION'] ! pip install --ignore-installed -q pyspark==2.4.4 ! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret ! pip install --ignore-installed -q spark-nlp==$version import sparknlp print (sparknlp.version()) import json import os from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * import sparknlp_jsl spark = sparknlp_jsl.start(secret) ###Output _____no_output_____ ###Markdown 1. Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated agains the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import os import re import pyspark import sparknlp import sparknlp_jsl import functools import json import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from pyspark.sql import SparkSession from pyspark.ml import Pipeline from sparknlp_jsl.annotator import * from sparknlp.annotator import * from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_event = NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverter()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.7 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.7 MB [OK!] ner_events_clinical download started this may take some time. Approximate size to download 13.7 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.6 MB [OK!] ###Markdown **Create empty dataframe** ###Code empty_data = spark.createDataFrame([[""]]).toDF("text") ###Output _____no_output_____ ###Markdown **Create a light pipeline for annotating free text** ###Code model = pipeline.fit(empty_data) lmodel = sparknlp.base.LightPipeline(model) text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) results[0]['ner_chunks'] ###Output _____no_output_____ ###Markdown **Sample free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ results = lmodel.fullAnnotate(text) ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 2. Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown 3. Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.43** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) events_ner_tagger.getClasses() loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 4. Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = sparknlp.annotators.NerDLModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentence", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ loaded_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) loaded_model = loaded_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(loaded_model) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 5. Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = NerDLModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(ddi_model) loaded_lmodel = LightPipeline(ddi_model) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown 6. Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = NerDLModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) loaded_lmodel = LightPipeline(chemprot_model) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' annotations = loaded_lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/i2b2_clinical_rel_dataset.csv # if you need to custoize the DL arcitecture (more layers, more features etc.) from sparknlp_jsl.training import tf_graph %tensorflow_version 1.x tf_graph.build("relation_extraction", build_params={"input_dim": 6000, "output_dim": 3, 'batch_norm':1, "hidden_layers": [300, 200], "hidden_act": "relu", 'hidden_act_l2':1}, model_location=".", model_filename="re_with_BN") tf_graph.print_model_params("relation_extraction") data = spark.read.option("header","true").format("csv").load("i2b2_clinical_rel_dataset.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = sparknlp.DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = sparknlp.annotators.DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = sparknlp_jsl.annotator.RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(50)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = sparknlp.Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output _____no_output_____ ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = sparknlp.annotators.NerDLModel()\ .load("ner_clinical")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) trained_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") loaded_re_model = trained_pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ loaded_re_model_light = LightPipeline(loaded_re_model) annotations = loaded_re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output re_clinical download started this may take some time. Approximate size to download 6 MB [OK!] ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/10.Clinical_Relation_Extraction.ipynb) 10. Clinical Relation Extraction Model Colab Setup ###Code import json from google.colab import files license_keys = files.upload() with open(list(license_keys.keys())[0]) as f: license_keys = json.load(f) %%capture for k,v in license_keys.items(): %set_env $k=$v !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/jsl_colab_setup.sh !bash jsl_colab_setup.sh # Install sparknlp-display ! pip install spark-nlp-display import json import os import sparknlp import sparknlp_jsl from pyspark.ml import Pipeline from pyspark.sql import SparkSession from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) print ("Spark NLP Version :", sparknlp.version()) print ("Spark NLP_JSL Version :", sparknlp_jsl.version()) ###Output Spark NLP Version : 3.0.1 Spark NLP_JSL Version : 3.0.0 ###Markdown Posology Releation ExtractionThis is a demonstration of using SparkNLP for extracting posology relations. The following relatios are supported:DRUG-DOSAGEDRUG-FREQUENCYDRUG-ADE (Adversed Drug Events)DRUG-FORMDRUG-ROUTEDRUG-DURATIONDRUG-REASONDRUG=STRENGTHThe model has been validated against the posology dataset described in (Magge, Scotch, & Gonzalez-Hernandez, 2018).| Relation | Recall | Precision | F1 | F1 (Magge, Scotch, & Gonzalez-Hernandez, 2018) || --- | --- | --- | --- | --- || DRUG-ADE | 0.66 | 1.00 | **0.80** | 0.76 || DRUG-DOSAGE | 0.89 | 1.00 | **0.94** | 0.91 || DRUG-DURATION | 0.75 | 1.00 | **0.85** | 0.92 || DRUG-FORM | 0.88 | 1.00 | **0.94** | 0.95* || DRUG-FREQUENCY | 0.79 | 1.00 | **0.88** | 0.90 || DRUG-REASON | 0.60 | 1.00 | **0.75** | 0.70 || DRUG-ROUTE | 0.79 | 1.00 | **0.88** | 0.95* || DRUG-STRENGTH | 0.95 | 1.00 | **0.98** | 0.97 |*Magge, Scotch, Gonzalez-Hernandez (2018) collapsed DRUG-FORM and DRUG-ROUTE into a single relation. ###Code import functools import numpy as np from scipy import spatial import pyspark.sql.functions as F import pyspark.sql.types as T from sparknlp.base import * ###Output _____no_output_____ ###Markdown **Build pipeline using SparNLP pretrained models and the relation extration model optimized for posology**. The precision of the RE model is controlled by "setMaxSyntacticDistance(4)", which sets the maximum syntactic distance between named entities to 4. A larger value will improve recall at the expense at lower precision. A value of 4 leads to literally perfect precision (i.e. the model doesn't produce any false positives) and reasonably good recall. ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = sparknlp.annotators.Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens") words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") ner_tagger = MedicalNerModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_chunker = NerConverterInternal()\ .setInputCols(["sentences", "tokens", "ner_tags"])\ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") reModel = RelationExtractionModel()\ .pretrained("posology_re")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_chunker, dependency_parser, reModel ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) ###Output embeddings_clinical download started this may take some time. Approximate size to download 1.6 GB [OK!] pos_clinical download started this may take some time. Approximate size to download 1.5 MB [OK!] ner_posology download started this may take some time. Approximate size to download 13.8 MB [OK!] dependency_conllu download started this may take some time. Approximate size to download 16.7 MB [OK!] ###Markdown **Create a light pipeline for annotating free text** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day. """ lmodel = LightPipeline(model) results = lmodel.fullAnnotate(text) results[0].keys() results[0]['ner_chunks'] results[0]['relations'] ###Output _____no_output_____ ###Markdown **Show extracted relations** ###Code for rel in results[0]["relations"]: print("{}({}={} - {}={})".format( rel.result, rel.metadata['entity1'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['chunk2'] )) import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df rel_df = get_relations_df (results) rel_df text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown **Visualization of Extracted Relations** ###Code text = """ The patient was prescribed 1 unit of Advil for 5 days after meals. The patient was also given 1 unit of Metformin daily. He was seen by the endocrinology service and she was discharged on 40 units of insulin glargine , 12 units of insulin lispro with meals , and metformin two times a day. """ lmodel = LightPipeline(model) results = lmodel.fullAnnotate(text) from sparknlp_display import RelationExtractionVisualizer vis = RelationExtractionVisualizer() vis.display(results[0], 'relations', show_relations=True) # default show_relations: True ###Output /usr/local/lib/python3.7/dist-packages/sparknlp_display/relation_extraction.py:354: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray relation_coordinates = np.array(relation_coordinates) ###Markdown Clinical RE **The set of relations defined in the 2010 i2b2 relation challenge**TrIP: A certain treatment has improved or cured a medical problem (eg, ‘infection resolved with antibiotic course’)TrWP: A patient's medical problem has deteriorated or worsened because of or in spite of a treatment being administered (eg, ‘the tumor was growing despite the drain’)TrCP: A treatment caused a medical problem (eg, ‘penicillin causes a rash’)TrAP: A treatment administered for a medical problem (eg, ‘Dexamphetamine for narcolepsy’)TrNAP: The administration of a treatment was avoided because of a medical problem (eg, ‘Ralafen which is contra-indicated because of ulcers’)TeRP: A test has revealed some medical problem (eg, ‘an echocardiogram revealed a pericardial effusion’)TeCP: A test was performed to investigate a medical problem (eg, ‘chest x-ray done to rule out pneumonia’)PIP: Two problems are related to each other (eg, ‘Azotemia presumed secondary to sepsis’) ###Code clinical_ner_tagger = MedicalNerModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"]) # we can set the possible relation pairs (if not set, all the relations will be calculated) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.relation!="O"] ###Output _____no_output_____ ###Markdown Clinical Temporal Events RE Temporal relations, or temporal links (denoted by the TLINK tag), indicate whether and how two EVENTs, two TIME, or an EVENT and a TIME related to each other in the clinical timeline. There are 3 type of relations here and below are some examples of Relations, with square brackets indicating EVENT and TIME connected by a temporal link:**`BEFORE`**The patient was given stress dose steroids prior to his surgery. ([stress dose steroids] `BEFORE` [his surgery])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `BEFORE` [admission])His nasogastric tube was discontinued on 05-26-98. ([His nasogastric] `BEFORE` [05-26-98])**`AFTER`**Before admission, he had another serious concussion. ([admission] `AFTER` [another serious concussion])On postoperative day No 1, he was started on Percocet. ([Percocet] `AFTER` [postoperative day No 1])**`OVERLAP`**She denies any fevers or chills. ([fevers] `OVERLAP` [chills])The patient's serum creatinine on discharge date, 2012-05-06, was 1.9. ([discharge date] `OVERLAP` [2012-05-06])His preoperative workup was completed and included a normal white count ([a normal white count] `OVERLAP` [His preoperative workup])The patient had an undocumented history of possible atrial fibrillation prior to admission. ([possible atrial fibrillation] `OVERLAP` [admission])| Relation | Recall | Precision | F1 || --- | --- | --- | --- || OVERLAP | 0.81 | 0.73 | **0.77** || BEFORE | 0.85 | 0.88 | **0.86** || AFTER | 0.38 | 0.46 | **0.43** | This RE model works with `ner_events_clinical` NER model and expect the following entities as inputs:[`OCCURRENCE`, `DATE`, `DURATION`, `EVIDENTIAL`, `TEST`, `PROBLEM`, `TREATMENT`, `CLINICAL_DEPT`, `FREQUENCY`, `TIME`] ###Code events_ner_tagger = MedicalNerModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") clinical_re_Model = RelationExtractionModel()\ .pretrained("re_temporal_events_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ .setPredictionThreshold(0.9) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) events_ner_tagger.getClasses() text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df text ="On 9–28-92, the patient will return for chemotherapy and she will follow up with her primary doctor, for PT and Coumadin dosing on Monday." annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] ###Output _____no_output_____ ###Markdown **Admission-Discharge Events Exraction**We can also extract `ADMISSION` and `DISCHARGE` entities in addition to `ner_events_clinical` model entities using `ner_events_admission_clinical` NER model and expect the following entities as inputs: [`EVIDENTIAL`, `OCCURRENCE`, `TREATMENT`, `DATE`, `ADMISSION`, `TIME`, `FREQUENCY`, `CLINICAL_DEPT`, `DURATION`, `PROBLEM`, `TEST`, `DISCHARGE`] ###Code events_admission_ner_tagger = MedicalNerModel()\ .pretrained("ner_events_admission_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_admission_ner_tagger, ner_chunker, dependency_parser, clinical_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) events_admission_ner_tagger.getClasses() text ="""She is admitted to The John Hopkins Hospital on Monday with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown **Test-Result-Date Extraction**There is another Relation Extraction model to link clinical tests to test results and dates to clinical entities: `re_test_result_date` ###Code trd_ner_tagger = MedicalNerModel()\ .pretrained("jsl_ner_wip_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") trd_re_Model = RelationExtractionModel()\ .pretrained("re_test_result_date", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, trd_ner_tagger, ner_chunker, dependency_parser, trd_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text = 'Hospitalized with pneumonia on 11 June 2019, confirmed by a positive PCR of any specimen, evidenced by SPO2 </= 93%.' lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown Human Phenotype - Gene RE https://github.com/lasigeBioTM/PGRHuman phenotype-gene relations are fundamental to fully understand the origin of some phenotypic abnormalities and their associated diseases. Biomedical literature is the most comprehensive source of these relations, however, we need Relation Extraction tools to automatically recognize them. We present the Phenotype-Gene Relations (PGR) model, trained on a silver standard corpus of human phenotype and gene annotations and their relations. It extracts 2 label: `True` or `False` ###Code pgr_ner_tagger = MedicalNerModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_Model = RelationExtractionModel()\ .pretrained("re_human_phenotype_gene_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["hp-gene",'gene-hp'])\ .setMaxSyntacticDistance(4)\ pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown Drug-Drug Interaction RE In clinical application, two or more drugs are often used in combination to achieve conducive results, such as synergistic effect, increasing therapeutic effect and reducing or delaying the occurrence ofdrug resistance. However, there is a potential for harmful drug-druginteractions (DDIs) to occur when two or more drugs are taken at thesame time or at certain interval, which can reduce or invalidate theefficacy of drugs, and increase toxicity or even cause death. Therefore,in order to prevent harmful drug-drug interaction (DDI), medical staffoften spend much time in reviewing the relevant drug alert literatureand drug knowledge bases. **ref**: *Drug-drug interaction extraction via hybrid neural networks on biomedical literaturehttps://www-sciencedirect-com.ezproxy.leidenuniv.nl:2443/science/article/pii/S1532046420300605* ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACgIAAAHOCAYAAABQRIDbAAAgAElEQVR4AezdB5gsRb03YDJIkIyCIMGLwkUuiIICSlIJl4yAIgICIoIEUST5ITkICF6ikiQjUUU4ZJEkOUtSMoJkyflQ3/Mf7bZndkL37Oye2d23n2ed0NXVVW9X9wzO71RPlMbA8u6776Ybb7wxHXvssemCCy5IDz74YPrggw9GfM/feeed9MADD6RLLrkkHXfccem0005LV111Vfrb3/6Wxo8fP+L7pwMECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0Flgos5FlCBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6VUAQsF+PjHYRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIESAoKAJZAUIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC/SogCNivR0a7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBACQFBwBJIihAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgX4VEATs1yOjXQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoISAIGAJJEUIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC/CggC9uuR0S4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBCQBCwBJIiBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwUEAfv1yGgXAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAoISAIWAJJEQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0K8CgoD9emS0iwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIlBAQBCyBpAgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOhXAUHAfj0y2kWAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBEoICAKWQFKEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0q4AgYL8eGe0iQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIlBAQBSyApQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+lVAELBfj4x2ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBEgKCgCWQFCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0qMGaCgG+88Ua64IIL0oEHHphOOOGEfj0ew96um2++OZ144onpF7/4RbrhhhvSe++9N+xtsEMCBPpP4IknnkjHHnts7Zp522239V8DtYgAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQCAXGNVBwH/+85/pV7/6VVpttdXShz70oTTRRBPV/j796U/nAKPlSYT4DjnkkHTXXXeV6tI111yTFl100dwks5lnnnnSM888U6vjzjvvrNUZYcHRtlT1Gm3915+hERjp4+qee+5Ju++++4Brw+GHHz40YGolQKClwEi/nrTsmBUECFQSuPZv/0g/OPv6tn9Pv/x6pTqHu/Bv73y0bfujf6+9/e5wN8v+CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjTmDEBAHffvvtdNlll6Vdd901bbTRRmnFFVdMyy67bPrGN76Rdtxxx3TEEUcMCMGdf/75A4JuEXgbbUHAffbZJ+/nZJNNlq699tq2A/X0009Pk08+eW2b6aefPt82CwMed9xxtVkTs9fx+Jvf/KZtnSNpZVWvkdQ3bZ1wAqNhXK299toDrgdx/o+UIOC7775bm9k0AlTt/u6///70zjvvVBosQ1l31pAIYrZrd6yLgHvVpWzb2+27bMi8atuUby4wGq4nzXvW/t04L88666z0k5/8pPb97ktf+lKK69L2229fe7/qefu3v/2tFm5ef/31U9T1ta99Le28884Dvi+2b9V/1j777LPp+OOPT2uttVZaaqml0lVXXfWflSWfxfl43nnn1dr1rW99q/ZddvXVV0/bbLNNOu2001J837UQKAr88Jw/p8V/+pu2fw880/qz4d33x6er//p02uMPt6QVf35B+tJ+56aL//JEcRctnz/0/CvpN7c8VPnvyX++Vlfn+r+6tG37o38vvG7s16F5QYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoQqDvg4Dvv/9+Ouigg9I000zTNKBSDKvF849+9KNp4403rs1q9/jjj6eTTjqp9hfhv6zsaAsCzjXXXHnfoo+77bZby6EQAZhsdsQlllgixQ/SxxxzTJp66qnzOi688MK05JJL5q+jzg022KBlnSNtRRWvkdY37Z1wAqNhXEWoJa6Zcc3NrpfxOFKCgE8//XRdu4t9aHw+ySSTpHnnnTd99atfTVtvvXUtgNPu1uhDWXeM2pdffjlNPPHEHdsf1+eqS5W2NzplrxdeeOGqu1V+EAKj4XpSpftx7h1wwAFp9tlnb3sOzDLLLLVAYKe633rrrbT55pu3PafWXHPNUsHav/71r7Vr4tJLL53iupGdE/FY5doY32f333//NMccc9TVUawvns8888zp7LPP7tRF68eQQDEIuOwB56UTrn9gwN+Lb9SH6F596910wV2P1Wbhi+BfY5Dw0CvKzSAeIcDGbcu8vuy+J+uOULSlsd1bnXFtXd2CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhLo6yBgzED0uc99bsAPptNNN11aZpll0re//e20/PLLp5lmmmlAmdtuu60OJGYQzH5sHW1BwIUWWijvW/QxbofcallhhRXysuPGjcuLPfjgg2nllVdOe++9d+29lVZaKS8XdbYLF+aVjJAnVbxGSJc0sw8ERtO4ilm0sutlPFYJu0zIQxGz5a2zzjq1mboaAzvzzz9/+uxnP5si0BafIcX+Zc8jGBjXz2azjg1l3WH2+uuvp1VXXbXW9iysnbUr+rL44ounuC7feuutlYmztn/hC18YEIzKXMKm8e8jH/lI7iQIWJl9UBuMputJJ4j4Bwlx3mbjfcopp6x9x9t2223Teuutl4rjMMpMOumk6eSTT25ZbcyoF6G9rL44fzbbbLN09NFHp6hziimmyNd98pOfTC+++GLTumJ2vgUXXDAvm9VXfCx7bYw2xSyC2bbRh0UXXTStscYaad11102zzjprvi7KxPozzjijabu8OfYEikHAlQ69oCPAzy69I31hr7PqQnaN4b3hDgI2a3QEA4vtEgRspuQ9AgQIECBAgAABAgQIECBAgAABAgQIECBAgEA1gb4NAh522GH57WuzH04jFHjHHXekDz74oK6XEdqIWazih9Os7FgKAl5++eVpzjnnrM1UEz8qv/Za/e24MqxiuGeqqaZqe/u522+/Pc0333y10MhXvvKV9NRTT2XV9OXjdtttl2abbbZ05JFHdmxfWa+OFY2gAlV8RlC3+qqpI2VclRkLxWtFXFPLhl366YBE8Cf7PIjHSy+9tK55//jHP9I111xTu+Vo8bMjyn7xi19Mr776al354ouhrDv2c+KJJ9a1/Qc/+EFx94N6/s1vfrOu7kaXYuUREM8MhzoIWGZcFts2Up+X7edIuZ704jj8+Mc/zsdZnItPPlk/k9grr7yStthii7xMjMmYPfPMM89suvutttqqrmxjoO7qq6+uC8TGP4JotnznO9/J65l22mnT5z//+QGzJR9xxBHNNh3wXvxjiuxcin+88thjj9WViRlBYwbDrEw8RoDx5ptvrivnxdgUqBoE3Low096Se5+d1j3mkrT6ERfVhe4EAcfmWNJrAgQIECBAgAABAgQIECBAgAABAgQIECBAYPQL9GUQMH74bAxm7LDDDk1naSoeotVWWy3/EXUsBQHDIMKR8UNyu2WPPfbIfSLkV2aJmaT6fXnppZfyWxvHLU3LLGW8ytQzEsp04zMS+tWPbez3cVV2LIyGIGCE54qhmssuu6zlkLnzzjsHBHwi9NPqmjqUdUcjzz333Lq2//SnP23Z9qorvve979XV3c4l6p5hhhlq5YcyCFh2XFbta7+Vr9rPfr+e9Mp37rnnzsfk5JNP3rLaVVZZJS8X53bMZhlGxeWJJ56o+/4Ys2g2WxpDd9dee+2AYnfffXf6/e9/nx555JF8P7/5zW/q2lA2CPhf//Vf+XbR32bL+PHj03LLLZeXiz5G6NhCoGoQ8JEXXkmX3PtEisfx4/91jpx0w4ODDgLu+rub0stvvlPq7933x3c8cGYE7EikAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgskDfBQHjFnEROCgGOH74wx+W6tguu+ySbzfWgoBlgDbYYIPcJ24ROVqWgw8+OO9X2SDgaOl7mX7wKaM0NsqUHQtjLQgYRz9COHGrzuJnz+qrr950YFQJAlatO8r3UxBwmWWWqZkMZRCw7LhsejBG0JtjpZ9VDknMzFk859oFARuvS7HdfffdV7e7Pffcs66+Bx54oG599uKFF15IxVtwf+tb38pWtX3sJggY15bivloFAWPHl1xySV37YyZsC4GqQcBmYr0IAu7xh1uaVd31e4KAXdPZkAABAgQIECBAgAABAgQIECBAgAABAgQIECDQUqDvgoD77LNP3Y+gM844Y4pZdMosJ598cr5t3EK4uKy44or5uk9/+tPFVS2fx4+3zz33XIpZYeLH5rfffrtl2WYrYqaap59+OkUo8aGHHkqvv/56s2ID3utmuzfeeCPdcsstKWa2arUsv/zyuUE877TELZf/8pe/pHvuuadT0br1zzzzTIpbC8fthMOw7NKN97PPPpvmnXfevF9lg4BlvJq1O/YXY6vVTGHNtun0Xhzv+++/f8CtAjttV2Z9tz5l6u62TAQ/Yny8+OKL3VaRHn/88XTrrbemqjNWdnNuVWlkt+Mq20e/jIXGwE3jrYHffPPN2nUxrg9xneh26fY4ltlf1bBe1BmzghVDSfE8xmrjMpR1x776KQgYQaq4hXJjuL7RpNvXvbxGPfzww7VrQ6e2dPNZ06rOsp933fRzMNeTXn9WDeW1ac4558zPu3ZBwDgGM888c142zs+LLrooPzTvv/9+mmuuufL188wzT76u2ZMs5Br1TDXVVKU+k7oJAsa+t9xyy7xdMTtgqyW+NxWvQQsuuGCrooN+v+zYbbWjXo+xsudvsT3dfJ+I7+Mx83in7/Qxu2SUi1tTd1qG+ruFIGCnI2A9AQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAJtBXQcAIBk055ZR1P4IecsghWVs7PsYML+PGjUuXX375gLJlg4BvvfVWOvbYY9PKK6+cpphiirq2TDLJJLXQ2a9+9av8NnEDdpRSLTTx5S9/uW4GmOyH3Y9//OO1W73FfhqXCFtU2e75559PJ554YlpjjTXyfX3sYx9rrLYWMPvlL3+ZZp111rw/MSPNrrvu2vTvxz/+cfrKV76S1xltj5ka2y1x+7zYZrbZZsv3EdvF8fzmN7+Z7rrrrqabd+sdff/+979f18bYXwQKYgad4l8EJGMp69XY0FNPPbVmXAwrxL4+8pGPpI033rh228DGbYqvH3300XTYYYelr371q2nxxRdPZ511Vm38nHPOOWnNNdesCzZMP/30ae+9904RaBjMUsUn2hDtavUXYZRsOemkk5qWW3XVVbMiKX48P/LII2vnUNbfeC9m9iyOwTCMmSmvvPLKfNt2T4477riaYTEIErcQj1u4HnrooUNyTrZrT7auyrjq97GQnSvNgoDvvfdeinNhhRVWSHEtjOMXfzPNNFOK2VjLjtnBHMfMvMxjN2G9qDeuV1nf4vFrX/vagN0NZd2xs34IAkZwvVl4/Wc/+1nTa0B2/fjrX/9a89p0002blttmm21q66tco7JxGcGlGD/xmRf7O+CAA2p1RZA6rvnZcZt99tnTk08+WVuX/U+3nzXZ9sXHKp93VftZ5XpSbFM8H8xn1YS4NmXtv/fee1OMlzj34jOm3fLZz342P85xvIu3to7n2RiIx6iv3bLzzjvXlT/qqKPaFa+t6zYIGEGx+EcEf/rTn9r+Q4Kbbrqprk1xK+9eLlXGbrP9DmaMdXP+9ur7RPzDgZ/85CcpgpXZGDnmmGMGdDH+0c++++6bFltssbxcfCdutVT93t6qnk7vCwJ2ErKeAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgE+irIGDjD6wRNomZp3qxlAkCRvhlkUUWyX/8i5BRzC4T4bqJJ544fz9+RIzZXZotjTMaRtkZZpihbtt4r3E2tG62i/ZlP2hmj82CgNm6wTy2CgLGjIfrrrtuXTsiDBgz7Uw99dT5+9NNN11tZpWi2WC8L7744rzuTv3KgmZlvbI2xkyU6623Xt1+4vZ+xRkIY98xg9Fee+2VbZY//uEPf6gbT1k7l1122RSzUmavmz1uvfXWeT3dPKniU5xBqbEtMUvSa6+9ljchfhAvhsCy8vPNN1+69NJL6348z9bF7EfFsZC9X3zcb7/98n00PolZh1ZbbbU6rwiYNp6T66yzTiqGFrN6ujm3sm3LPJYZVyNlLGTnSmMQMAKbER4uHrPG55tssklbrsEex7aVN1nZbVjvhhtuqOtnjLPGmSeHsu7oyoQOAsaMWdnxbex74zUxKxePYRUz6MbyxS9+Ma+jWOZ///d/a+urXKP+7//+rxYabjznIyR13XXXpfh8Ke4jnhdvCTuYz5paY//9P9183lXpZ5x/Za4nxTbF88F8Vk3Ia1NjP8q8nmOOOfJjHVbFz6cYJ8VxcMQRR7StMkJxxfLxjws6LY3fUzvto1N9jesjVF1s0+9+97vGIl297mbsFnc0mDEW52h8hlQ5f48//viefZ+I72dF0+x540y3EXLM1hUfd9hhhyJF/nyov1vkO0opCQIWNTwnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJ9BXQcAtttii7ke4CFz1aikTBCzOKPTRj360btaWCBUsvPDCefviB+iYhai4xMwgxZBUhKYee+yxWpG43VtxpqliELDb7SL4FH/FmeqaBQE333zzFH9xm+Xsx80IUmXvN3v8n//5n7xsbNMsCBghzQh5ZXVGSK74o3Xcsi9bF4+Ns/MMxjtu2XzhhRfWZg8q7iNmior3i38xU2QsZb2ibNz6N24rmNUdYb/4kTibyfGRRx5JK620Ur4+yv385z+v7Sf7n5h9Jtu+2eMss8ySllxyydqMdtNMM01d2ckmmyzFbfK6Xar6hNECCyxQ14YIDjRbYmaj9ddfv1Y2ZueLcvFezKLTrJ/ZexEkW3311dNCCy00IBAQZU455ZQBu4tb8hXHd2wbQa2YfS7WNRr/6Ec/qquj23OrrpIOL8qMq8Z2ZibZYz+NhehuYxCwsZ0xK1ec79n78Rghj1Yzfw72OHY4BE1XdxvWi1sdN85MG7NJFZehrDv2M6GDgMVrd2MQMNr397//PcXnW/H4R6C4GMqKcsXQ1FprrVWbPTWuFbFUuUbFLITFfWXPY1bAZiHAWF8MAg7ms6bW2JRq/yihm8+7Kv2M63CZ60nWpngc7GfVhLw2FftR5nncijw79vEYx7+4NI7JCPq1W2KGvmJ9MTtup6U4pmPbXgYB4zOwGJbbaKONUszEOthlsN/VBjvGYqbFonP2vN35u/322zfdJtu2yveJM844o3ZeFf87IOppDAJGEDc7/7L9xGOzIOBwfLcoHvd+CQJucNzl6Zir701bnX5NWvPIcWmFg36bVjr0grT5yX9Ke114azrvjkfSu++PLza97fMTrn8gLf7T3+R/L7z+dtvyVhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECHQW6KsgYOOPdPG6V0ux7piNrXEpzoAUP/zFj4yNy9VXX133w2TjbeSK4ZAI9kSgpLhcccUV+fbFIGC322V1f/3rX8/rbRYEzMoVZ6HrZNs4K02zIGDc6jX7sTR+vI5b3hWXxpmQij/a98I79hWBlKwN8XjQQQcVm9D0eRmv4jGJek877bQBdcVtM4sBkzC4/vrr83IReBk/fnx68MEH69oYt1aNH5GLS9wyb+mll64rd8IJJxSLdPW8is/BBx9ct//TTz+96T5jXGe3+N1xxx3zMq36u/baa+eB2KxwmDTOrBgzSTaeM8XjEMHMZrcqLR7PmHkwZp7LluL2Vc7JbPsqj8V2NJ6HrWz6dSw0BgGjnddee20dxz/+8Y8Bs75FYKvZUjwO3RzHZnV2eq+4zziHi7cQ7bRtBE6L15Wzzz67bpOhrDt2NJxBwLjVbgTWi39bbbVV3v9mQcBoYwSUGm/TWpwZNcLvcfv0cPzUpz6VXn311TrD7EWZa1R2/sT1tNnsonH75rjV+i9/+cs8GJgFAXv1WTOYz7voa5l+Zibx2O56UizXOBarflZlthPyc6rYn3bPi+MyxlVjCG+DDTbIx22sv+qqq9pVVwvbF8/zGM+dll4FASM0G+ZxXY3P+vhHAcW2xOyEcWx6sQx27PZqjFU5f+Mf2jT7/jSY7xONn2uNQcCidTGQ2SwIWDQZ6u8W0a5+CQIWQ3utnkcw8KoHnypytnwuCNiSxgoCBAgQIECAAAECBAgQIECAAAECBAgQIECAQNcCfRUELN6WN34QLXObtrI97xQEjNBA8ZZ8MfNP4/L222/Xzday88471xWJW75mP+R+8pOfHPAjbvzoG/uIv2IQsNvtsp2XDQz0Mgh4yy231M1+uOqqq2bNyR8bg4ARNMuWXnhHXVXDFbFNJ6+YdShm5MuOZTHAmLU/e4xwUVYuHiOQ0riU/fH55ptvrqsrZmoa7FLFJwJ0MfNh1p/ll1++6e7PPPPMWpn4oTz61riU7W+0LX5Az/YXjyeddFJeXdxitHgcGsNYWcHGYxBhoGwZ7LmV1VPmsdO4ijrK2kzosVC2nffcc0/d8fvud787gKoXx3FApSXeKAY1YmxVCQI2hnIPPPDAuj0OZd2xo+EMAhbPv2bPWwUBo51xrSzOnhjn64033libQTYznHbaadN9991X51d8UeUaFdsVg4BxDTrssMOK1dXCwvH5GiGiWHrxWTPYz7toR9V+lrme9PKzquw5PxTXproD2OJF3Hq+GM760pe+NOA7VrxXHMO33357i9r+9XbMwFgsH8HVTkuvgoDnnXde3b6zdsQMuDEjZ6+WwY7dXo6x6FPV87fsuOz0fSL2XbauKFsca82CgMP53SLa029BwGX2Py8t/7Pf5jP5NYYCv7DXWenKB/4eTW+7CAK25bGSAAECBAgQIECAAAECBAgQIECAAAECBAgQINCVQF8FAeeff/66H0b32GOPrjrVbKNOQcDYJmYwuvPOO2t/8aNis2X66afP29gYVIzbymU/5sbjTjvtVLuFabN6iu91u11WR5nAQJTtZRAwjk2xr/EjfeMSwcnjjz++FuiM2eXidXEZrHfUVTVcEdt08opZ7op9azdrTIRNYva3rHyEPBvHTtkfn2M2vOKtpb/3ve8Vubp6XtUngoxZX1oF/ZZbbrlamVazSpbtb3Ro9913z/cX+y0GSnfdddd8XbQlbk/YbIlbKGdtjse4PWS2DPbcyuop89hpXEUdZW0m9Fgo287oU/G21mHQuPTiODbWWeZ1L8N6e+65Z90uh7Lu2NFwBgEj/DvVVFPV/RXPp3ZBwGhrhCSL5T/xiU/UbjufvRcz9bVbql6jikGiTTbZpF3V+brBftb04vOuaj/LXE96+VlV9pwfimtTfqBaPInjl81CG+MqxkDM9Ni4FL/jRLmYVa7dEjPMZuM0HmeYYYZ2xWvrhjoIGO2I7xHxWdwuQNuxof8uMNix28sxFk2qev6WHZdRd7vvE7G+Sl2dgoDD+d0i2j4hg4A3P/pcWu3wi9K+426rhftefOM/3+XfeOe92ux/ax918YBQ4LIHnJ/efKf9ra0FAePoWggQIECAAAECBAgQIECAAAECBAgQIECAAAECvRXoqyBgFjDKfpjddNNNe9bbMkHAVju799570zHHHJM23HDDulkDG4OAMRNZ1vbscYkllkhxS+B2S7fbZXWWCQxE2eKP5K1CXFmdnW4NvM4669T19aWXXso2HfRjWe/YUdVwRWzTyWuVVVap61unH+M32mijuvKNs/lU+fG5+CP5hAgCjhs3rq4vjbNexuxa2dg+//zzmx7rKv1tPH4LLLBAXucaa6yR7yv2ueiiizb9a7yN67rrrpvXMdhzK6+oxJNO4yqqqGIzIcdClXYWg7Bh0Lj04jg21lnm9WDCel/4whfqxl5c/4vLUNYd+xnOIGCzmRJ/+tOf5v3vFAR8//33U6NXdo0o3jq86Fd83ngN6HR7916eF2U/a3rxeVe1n2WuJ738rKpyzvfyGBTHQrPnMb6Ks6/FDJSXXHJJs6Lpy1/+cj5uYwzGbHjtlpgFNxur8Riz8XVaehUEfPLJJ9Pvf//72u2L4zHGfXz+Fdsz88wzpzvuuKNTk9quH+zY7eUYi4ZWHTtVxmXjOVb8PhH7rlJXpyDgcH63iLZPyCBg2wH275Xjx3+Qdjz3hgFhwJNueLDt5oKAbXpPOEIAACAASURBVHmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdBXQcAI/hV/BF1mmWW66lSzjaoGAW+66aa01VZbpY9//ON1bSq2rzEIGLPLFPdTLBu3rPvzn//crGm1Wxl2s11WWZnAQJTtZRAwbp2c9W+66abLmtL1YzfesbPGH347hUhim05ejce81Ux0WWcbQ5NHHnlktqr2WOXH56o/ktftqMmLqj4xw+Fcc82VH9u4VeJ77/1nRpcf/vCHtXURmIiARrOlSn9j+5iFKRtLH/rQh/Iqi2MsW1/mcfXVV8/r6PaczCuo8KTTuIqqqthMyLFQpZ2dgoC9OI4VDkNedDBhvcbZaa+88sq83ngylHVH/RM6CFi83XanIGC0NwLCce4Wz88Iwbe6RhQxq16jBntedPNZUxzD3X7eVe1nmetJLz+rqpzzgz0GxePf7nnMIrz22mvn42qKKaZIF154YctNNt5447xsjMXG87Zxw8Y+R6C109KrIGCz/XzwwQepGMKNPsw444zp6aefbla81HuDHbu9HGPR4Kpjp/EYtZuhOepv9X0i1lWpq1MQcDi/W0Tb+z0IGG187e1300qHXlAXBtz2N9fFqpaLIGBLGisIECBAgAABAgQIECBAgAABAgQIECBAgAABAl0L9FUQcK+99qr7EXeOOeboumONGxaDdhGIa7Wcdtpp6TOf+UxdO+L1z372s3T33XendrcGjjrffffdWoAwbu1WDEVkz9dff/305ptvDth9t9tFRWUCA1GuV0HAaGvxFrYf/ehHB/Sn7BuD9a4arujkFX0r/gA82WSTdezKPvvsU3es43VxqfLjc9UfyYv7afa8G5/GIMJ5551XqzpCGTFDUYzlxlulFvddpb+xXdxKNDs/4jalETyMv+IYW2qppVLMoFTm74UXXig2p+tzsq6SEi/KnIdVbCbkWKjSznZBwF4exxKHoK5It2G9V155pW7sxdiMcVdchrLu2M+EDgI+9thj6cMf/nBthrRmn1dFi+z5vvvum5/HYTbffPPVQu7Z+laPVa9R3Z4X3X7W9Orzrmo/O11Pev1ZVeWc7/YYtBoDzd5/9dVX0worrJCPqbh9dcyc127Zbbfd8vIxBlvNWpvVceutt9aVX2+99bJVLR+HMgiY7bRxFr/G7xRZuU6Pgx27vR5j0d6qY6fKuIz6m32fyJyq1FX8HrjDDjtkVdQ9hk/8g6Gq3/frKin5YiQEAaMrh15xV10QcN1jms/emXVbEDCT8EiAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6J1AXwUBb7/99rofZeOHuMYARrdd7xQEjNk94paiWSApHldaaaUBt5YrzjbSOCNgsW1xy8H4UbkYZsrqjrbED4jNlm626xQYyPbTqyBg1DfNNNPkVnGcIiRWZemVd9VwRbSxk1fM+JQdq3iMtrZbGoNzRxxxRF3xKj8+V/2RvG5HTV504/Poo4/WhSHj1oCxRJAmPCIc+dRTTzXZ27/eqtLf2KI4Y9Hss89eqyRmRirOMPbf//3fLfdXdkU351bZuqNcp3EVZarYTMixUKWd7YKAQ3Ecyx6TbsN6cWvv4vk/7bTTpuhHcRnKumM/EzoIWOxr2edrrbVWnVsYbrnllh03r3qNqnpe9OKzZrCfd4FQtZ9lrie9/Kyqcs5XPQYdB0FDgeeffz597nOfy8dTzEx74403NpQa+PLoo4/Ot4nxd+CBBw4sVHjnjDPOqCvfKvBV2CQNRxDwxBNPrGvX1772tWITKj0f7Njt5RiLhlcdO1XGZdTf7PtEBlalrjJBwKzeof5uEfsZKUHAi+55vC4I+I1jL8uYmj4KAjZl8SYBAgQIECBAgAABAgQIECBAgAABAgQIECBAYFACfRUEjJ7E7QSLIYzNN9+8dAfvueeetMYaa9T+Hn744brtOgUBd9ppp7r9br/99nXbZy/KBgGz8vED4Ve/+tW6uqN/f/zjH7MiTR+rbFcmMBA76WUQcKGFFqrr01/+8pem/Wj1Zq+8q4Yroj2dvP7nf/6nrm8PPvhgq27U3v/Od75TV77x2Fb58bnqj+RtG9ZF+CSrrzhmI8z6+OOPp7i9dYzdmK2o3VKlv3Er4rjdY3bOxwxQ2RLhv+z9CAW+9tpr2apBPVY5t6rsqNO4irqq2EzIsVClne2CgNHnoTqOnY5NN2G9CPw1fgZ9+9vfHrCroaw7dtZvQcAIrsdMm61C0QcffHB+rmbnbPZ4ySXtZ4Sqeg2vel704rNmsJ93cUyr9rPM9aSXn1VVzvmqx2DACdTmjYceeih96lOfysfTwgsvXPv8adzk5Zdfrs3SXPyuF7d9zsZdPMb3wXbLNttsU1c+wu6dluEIAl511VV17YqQbbfLYMduL8dY9KHq2KkyLtt9n4h9V6mrShAwOzZD9d0i6h8pQcDL7nuyLgi45WlXZzxNHwUBm7J4kwABAgQIECBAgAABAgQIECBAgAABAgQIECAwKIG+CwI2zoQSt9y6//77S3Xy8MMPz388ve222+q26RQEnG222fJt41a3rWbsaxUEfP/999Opp55a+3vjjTfq9h0vdtxxx7z++IF6//33r5XpdrviDsoEBqJ8L4OAjbM/bbfddsUmdXw+WO9sB43hiv322y9b1fKxk1fcvrkYJjj55JNb1hUriq6x3bPPPltXvsqPz1V/JK/bUZMX3fhENWeddVadQcxumZlcfvnlTfb0n7eq9PeGG27I6436i7NsrrrqqnXrjjvuuP/spMSzXpxbJXaTF+k0rqJgFZsJORaqtLNTEHCwxzEHrvikm7Der3/967oxF2GQO+64Y8Ceh7Lu2Fm/BQHj3Ivzc6655hpgcc0119RmCY31K6+88oAZ02J8vPTSSwO2y96oeo2qel704rNmsJ930deq/SxzPenlZ1WVc77qMciOdafHCNHPNNNM+TkY7nGL4GbLQQcdVCu3zDLL1K0uBtdmnXXWunWNLxZbbLF8X7Hft956q7HIgNfdBAHjFumnn356iuMVt5Jt9f0y29lRRx2VtyvOq5h1uNtlsGO3l2Ms+lB17FQZl+2+T8S+G+vaa6+9WrK2CwIO93eLaOSECgI+/fLrabuzrkvLHnB+Wu3wi9Lrb7/X0ixW/OLKu+uCgIddeXfb8oKAbXmsJECAAAECBAgQIECAAAECBAgQIECAAAECBAh0JdB3QcD4gW355Zev+xE0wgVlbj37rW99K9+uShAwfpSNH1uzv7i1WKulVRAwfkDOto9ZaRqX2EfxVqf77rtvrUi32xXrLxMYiPLFwFoEI9stu+yyS96f6FfjD9fnnXde3fro2z/+8Y92VebreuGdVdYYrigTSOzkFTNYZccyHouz1GX7zR4bb2f9+c9/PluVPzb++ByB1VZL1R/JW9WTvd+NT2z7zjvvpJlnnrnOISw++clPDrhNarav7LFKf+OWh0XrSy+9NKsmnXTSSXXrFl100Vq78gItnrz55pu1Nb04t1rsounbncZVbFTFZkKOhSrt7BQEHOxxbIpd4s2qYb04V+IWpMXx2OpWoUNZd3St34KA2cxpM844Y538M888k+J23mH28Y9/vDZrYBRoDA9985vfrNuu+KLqNarKedGrz5rBfN5lfa3azzLXk15+VlU556scg6z/nR7jtr5x2/kYSzELbXxHarwld7GObIw1BgGPPPLIunP4yiuvLG6WP49/YBL/0CQ731ud6/kG/37STRAwQoDZfuJx3LhxjdXmr2NWu/geUSzfqg/5Rm2eDHbs9nKMRTOrjp0q47Ld94nY9z//+c8612984xst5doFAYf7u0U0ckIFAfcdd1tdsO/0m//W0uzF19+uBQYX/+lv8m0uv//JluVjRZUgYFwP4jvi7rvvnq677rq29VpJgAABAgQIECBAgAABAgQIECBAgAABAgQIEBjLAn0XBIyD8dxzz9VmHir+ELrIIoukTrdojZBStk2VIGDss7ht1NHsVrdXXHFF3W1Mi7OXFX8YjB99G5cIOE477bR5+7Ifgrvdrlh/mcBAlO9lEDD6M++88+b9CbMFFlgg/fWvfy02LX8et5bNZkGMNwfrnVX89NNP17Xhs5/9bLaq5WMnr/ixsXg70/hBOGa9alyiXPGH5wgv3HLLLY3FJmj4qxufrAONYac4xj//+c+z1S0fy/5wHwGDMMvO2cZbDkeIpxgyi3IRCs6Cfo0NiGBHzD4XMyDF0otzq3Ef7V53GlexbVmbKFs1MNGubbGuylio0s7iMQqDxmWwx7GxvrKvG8fvZZdd1nTTCN5EOHe66abLx2KMtaWXXrplAH0o645G9lsQMLsteDEIGJ8Byy23XM0sbu9dDMA///zzqTgTX3iec845Tf2rjMuooOp50YvPmsF+3kW7q/azzPWkl59VVc75qseg6YH/95sxW97WW29dd+7tvPPOKW6P2+4vgqcxrhqDgHHL4GL7FlxwwQH/kCF23fgPTsrOPN1NEHDbbbet61+z241nRocddlhd2bhNcnyWdbsMduz2coxFH4rH5nvf+17HbpUdl52+T2Q7imtYjJv4i+vWAw88kK3KHx977LFUNghY5ft+voMunkyoIOCmJ12Vh/oi4LfWUc1DrG+9+37a6oxr68pu8us/pvHjP2jb2ypBwO9+97v5sYvj98tf/rJt3VYSIECAAAECBAgQIECAAAECBAgQIECAAAECBMaqQF8GAeNgRJBvjjnmqPvRJ4J0Bx98cC1wUAwDxY+kBx54YF3ZYhAwfsj8zGc+k6+fc845U/w4Wlwaf4heaqml0j333FO7nd/ZZ5894Efj+BFqgw02yKsoho5ilpHXXnstXxf7//GPf5zvP/oVP1bH0u12eeUppeJtj6effvqmP3pHG6Lf2Q+giy++eLGKAc8bf3BrvN1tbBCz1MQPqVmd8Rg/sv7oRz+qhT5uvPHG2q2St9hii1q56He2DNY7qyeOYzaLUNaOCBxG+Chmkfzzn/+crr766qx47bGMV8z0VwwGxUyQd955Z15P7Hf77bev6/uWW26Zry8+ufnmm+vKtbodXYyZySefPC/bLFRVrLfM8258snpj/Gem8TjVVFOlF198MVvd8rHxh/sINhSXCF6dcMIJdX2dZppp0hNPPFEsVnt+wQUXDDi+MTPgPvvsUwuJxL7+8Ic/pJgNNBsHq6yySm3bXpxbAxrU5o0y42qkjIXGdsaMcM2WmDkyrjnZOPnKV77SrFgazHFsWmGHNyNctNpqq+XtivZFG+K27TErVJzfZ5xxRm1moeJnQ9aPNddcs2XgdCjrzroVtzjP2hKP8VkT581glzhecYyKdf/2t79Nr7zySsu/CHfH+RnbFIOAxVljjznmmAFNawwzzjLLLClmEGxcqlyjXn/99brrRplrZK8+awbzeRd9rtLPKF/mehLlevVZ1XjOD9fn1E477VQ3Hotjs8zzxiBgmPziF7+oqzO+kxTPnwi0F+uOz/JOS3yHiutH8btc1LHZZpvVvu/F+lZLfEYV9xffJ2699da64vF51RgYjFsbP/zww3Xlunkx2LHbqzHWzfnby+8TYVc8r+KYxOfXbrvtlo4//vja58FXv/rVun+gEGU23njjOvbh/m4RO+8mCPjym++kx158NT3wzD/TbY8/n75zyp/qgnobHHdZuuWx52vro9wb7wy87e/x1z9Qt02EAQ+94q4Uwb9Y3h//Qbr9iRfSmkeOqyu33IHnp2dffaPOrdmLskHAuEV4MUQaxyUC6hYCBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGBAn0bBIymvvTSS2nDDTes+wE1+zE1bukWM73EbCmNYbQoc9ddd6X4YTtmdWqcuS7WzzXXXCnCfhdffHFNJQJjjT8yZfvKHuP2ho3hxIUWWij97//+b12gL8rPNNNMtSDK5ptvnhZeeOG8DxFWuvbaa/MjUfxBscp2EeiIH8BjFr6sfdljzNAVffvjH/+YYpabMJh77rkHlPvEJz5RK1ecqWmNNdaotbc4G0rUGz9If+ELX6jNVJU3PqV01lln5QGsbP+tHmP7bBmsd1ZPPEa9jfuM8ZHd9i9+5C3rVaw3ZhArzuIY+1hsscXS2muvXTu+2T5jVrsIxUT4sLiceOKJtVv8FYNSsU20K+o54ogjasVfeOGF2jH66Ec/OqAfMb4ilDmYpYxPq/qL226yySatitW93/jDffQ3ZmqM0E78CP/hD3+4rp8x2+cdd9xRV0fxxamnnlp6jM0zzzwpZmyKpdtzq7jvTs/LjquRMhbi/I/bUjaO2WzsZ2M2+v3FL35xwPUwysUMbDEzY+PS7XFsrKfd6wgsx61qm30mZOdru8e4LXxc05qFeoay7uhThMPj2t3qc2jKKaes3bo4rutVl6ztxaBxO4dW6yIIGDN3Ns72F5+xMR4yt7gddLPP3biFfMxM2zi7b/E6k+27eA2P7wER+mh2jYzZW+PzrtXsUL38rOn28y47Xp36GTPhlflcz+rLHgfzWTWhr03xDwWyY97NY7MgYLhstdVWdfXGd6UI1DYGf+OaF+HeVksEImPcNn4namxrrI9zN8ZIs2XvvfeuC7HG9tGWaFP0IQvbZvXGZ9kNN9zQrKqu3hvs2B3MGPvTn/7U9fnb6+8TMXNpZtzqMWYVblxX/C42HN8tGg9yN0HAXX57U104r3jL3mbPj7vu/sbd1oJ+O5x9/YB6ltrnnFr4Lx4b61r7qItrwcMBlTV5o2wQMEKk2T/2yI5NBPYtBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECAwX6OgiYNTdmU4nZnTqFO+JHoggE/OxnP6sFv2ImuOwHo1aPZ555ZrabWnCwWdAgZhjLZpVrnCkv6o3QTITAVlpppdoPxq32FT9a3X333fn+4slgtuvkETNexax8rdqTvX/00UfnbYqAZPZ+s8dDDz00L5s9iRmjIuTV7IfyCHMsscQS6bTTThsQlIugZrfe2b7jMX7YbVZPhF7i2EUoNJzLeBXrjecxg1UECpoFaOK9z33uc7WZ6Rq3i9c/+clP2lput912tc2eeuqptuUGO+tJGZ9m7Y/34pap2TiIsVRmafzhPtu+8TFu7Rghi8YAZbN9RHAoAkiNPwRHnfFeBCoi0FIMdHR7bjXbf6v3yo6rkTIWYua/xuNUfJ2N2U79Ls4cV7Tr5jgWt+/0PGasLN5uutj2xucxK9f8889fC6jsuuuutdt/F8dP476Gsu7YVwTfG9vY7PWFF17Y2LSOr6u0vdk+s/fiuM4333wt25kFAffcc8+WZaKu4uyq0fhO16jGGd6y9hQfY9a3VkuvPmui/m4/78r0M2aJ6+ZzKuru9rNqQl+bBhsEjNvFt1qOO+64pmHlGDfxjzUOOOCAPLzaqo64NhTHWafn5513XquqauM+wqDNvitl9UYYOGbYLPO52HJHLVYMZuxGld2Osd/97ncdDVudv0PxfSJu5xvXssw8eyz+46Bmxyj7LjYc3y0aD+FwBAHPvf2Rxt3WXsd1/ezbHk4r/vz3A0J/xRDgij+/IJ13xyMdbwdc3EnZIGBsU/x+EqHbuHW4hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKDAiAgCZs2OWzqOGzcuxY+7ESCK2XMi9BcBoOuvv77lrRyz7cs8xqwT11xzTTr22GPTJZdckprdErddPXE72pjd7KKLLqrdaiwCcDEj0XPPPddus9ptbLvZrm2lw7wywibR15gNKn5ojf7EzCntlsF6Z3XHbcMuvfTS2nEL85iBJuru1RLHNW6VG7MnxniLwEa8N1KWbn2y2x9H0K7s0vjD/Q477FA7H04++eTaOXHdddflt8YuW2dWLmaii2BnjK9TTjmlNsbivXZLt+dkuzpH8rpux0Iv+9zNcezl/tXVfwJDPS579VmTyXXzeRfbDnU/R/pnVebbq8cI98b3xrhFfdyG+OCDD05xO+w333yzV7uoXE+MxQi/nnDCCbXZhONWxfEPXiKo1y6MXHlHLTboduxm1Q3nGBuq7xNxO/T4rh+3BI7vdbGfqstwfrfoJghYtT+dykcgMG4xfMqNf02HXH5n7RbBER685bHn0vOvtf+u36ruKkHA2H+cN/FdMv570EKAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBcYEQFAZt3wbsECAyFQPxQnt0iNsK3ZZfGH+5jVkELAQIECBAgQKCKgO8T/9LqhyBgleNWtmyVIGDZOpUjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIx1AUHAsT4C9J9AC4F99923duu8WWedtdLsSX64bwHqbQIECBAgQKC0gO8T/6IqBgHjdrzxuvHv6Zd7Nwt26QNUoeBhV949oM3rHnNJ3e2GX3h95My0XaHrihIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MQH8K/O53v0tHHnlkeuONN2oNfOqpp9K0005bCwLuv//+lRrth/tKXAoTIECAAAECTQR8n/gXSmMQMMKAjX8PPvtyE8H+eWv9X106oM2NfXhRELB/DpiWECBAgAABAgQIECBAgAABAgQIECBAgAABAiNWQBBwxB46DSfQG4EnnngiTT755LXQ38wzz5z+3//7f+nzn/987fXHP/7xSrMBRosefPDB2rYTTTRR7fGwww7rTUPVQoAAAQIECIwZAd8n/nWoL733iXTEVfe0/Dv66nvTe++P7+txcfZtD7dsf/TtuOvu7+v2axwBAgQIECBAgAABAgQIECBAgAABAgQIECBAYKQICAKOlCOlnQSGSOCss86qC+5lAb5JJpkkXXzxxaX3ev/996fDDz88rbTSSnX1feITn0hxm+GTTjopvfLKK6XrU5AAAQIECBAYewK+T4y9Y67HBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECvREQBOyNo1oIjFiB++67L0066aR14b3JJpusFuqr0qkI+2UhwlaPd911V5UqlSVAgAABAgTGmIDvE2PsgOsuAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwQEAXtGqSICI1fgkksuSZtssklaeuml01ZbbZVuvvnmyp257bbb0jHHHNP274UXXqhcrw0IECBAgACBsSPg+8TYOdZ6SoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0FsBQcDeeqqNAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMq4Ag4LBy2xkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOitgCBgbz3VRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhlVAEHBYue2MAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0VkAQsLeeaiNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsMqIAg4rNx2RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEeisgCNhbT7URIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFhFRAEHFZuOyNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAr0V6Jsg4EQTTZT8MTAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGBjMGOhtxG5k1CYIKIAogGkMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8bAqBkDIyO619tWCgI6gUfNCTyYFLBtpciNAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBowBY8AYMAaMAWPAGDAGjAFjwBgwBoyB0TEGehuxGxm19U0QcGRwaSUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEOgvAUHA/joeWkOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoJCAJW4lKYAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0l4AgYH8dD60hQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKVBAQBK3EpTIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+ktAELC/jofWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBSgKCgJW4FCZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv0lIAjYX8dDawgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQCUBQcBKXAoTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIH+EhAE7K/joTUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCSgCBgJS6FCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfwkIAvbX8dAaAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQSUAQsBKXwgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoL8EBAH763hoDQEC9gMkGAAAIABJREFUBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqCQgCFiJS2ECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBfAoKA/XU8tIYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECFQSEASsxKUwAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoLwFBwP46HlpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQqCQgCVuJSmAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI9JeAIGB/HQ+tIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEClQQEAStxKUyAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPpLQBCwv46H1hAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgUoCgoCVuBQmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9JSAI2F/HQ2sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAlAUHASlwKEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB/hIQBOyv46E1BAiMAoFbb7017bnnnmmzzTZL+++/f7r99ttHQa90gQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoF8FBAH79choFwECI07g1VdfTWuttVaaaKKJ6v4mnXTSdNNNN6VHH300bbXVVrUyf/zjH4esf88//3zaZZdd0sorr5zOOeecQe/nwgsvTKuvvnracsst04svvjjo+lRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQW4ExGwR8/PHH04Ybbjjov4cffri3R0RtBAi0FDjkkEPanrObbrpp2nvvvdOZZ56ZYla+V155pWVdvV7x7rvvpiWWWKIWAPzMZz6TNtpoo7ow4NFHH53WWWed/L2pp546vfXWW71uRq2+H/7wh/l+JplkkvTkk092vZ/nnnsuTTnllHl9W2yxRdd12ZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBoBMZsEPCOO+7Igy2Ns3dVeX3BBRcMzZFRKwECAwRWW221yuftkksumS666KIBdfX6jbgVcFw7Jp544vTggw/Wqv/973+fZp111vSxj30sPfbYY2nuuefO2x8BvZdffrnXzajVt8wyy+T7iTbde++9Xe8nZi4sXhPXW2+9ruuyIQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwNAICAJONFHaeuut05133pn/xa00i8GXY489Nl8X5Xbbbbd8vSDg0AxMtRJoJvDOO++kp59+ujYrYHaOxmx1DzzwQLr//vvTNddck0444YTabXEXXnjh/DyNsosvvnht22b1Dva9t99+O80444y1/cV+iss///nPfOa/n/70p3mbNthgg2Kxnj4/8cQT8/0stdRSLet+4403ate/X//6123LLLjggrX6pphiivSHP/yhZVkrCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJoyAIOBEE6Ubb7yxTv+vf/1rHqKJANGVV15Zt/5vf/tbvl4QsI7GCwLDInDooYfm5+BUU03VdJ/vvfdeinLTTTddXnb++ecf1G1ym+4opXTWWWfl+9hss81aFau9f88999RuW9y2UA9WPvroo7Vg5AcffNCytl/96le1dq+wwgoty8SKCGDGzIDPPPNM23JWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITBgBQcCJJkovvvhinX6nIOD777+fYmasCAkKAtbReUFgWATKBAGzhtx9991psskmy4N6CyywQIpzuJfLT37yk7z+XXbZpZdVD2ld2ayJnYKAQ9oIlRMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxaYMwGAZ966qm00047pQjwNC6dgoBR/oADDqhtf9999zVu3tPX48ePTw899FC69NJL0+23316bmavKDv7+97/Xtr3++uvTu+++W2VTZQn0rUCVIGB0Yvvtt8+DehHgPeWUUzr2rcq5s/nmm+f177XXXm3rjlsYX3755bXzum3BhpUxw+HNN99cm+UvbjfcaXn88cfThRdeWLutebOyJ598ct7mTkHAuHZcffXVtfrefPPNZtUNeO/555+vzaYaM66Wae+ACv79RtV+t6rH+wQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB0SwwZoOA7Q5qmSBgtv3pp5+eZppppqZ/EQ7KlritZrNyEcaJ2cmuuuqq9IMf/CDFrUujXAT3DjrooDTnnHPmYZ0IMMXr4447Lqu26eNLL72Uvv3tb6fZZputbttpppkmrbfeeumFF15oup03CYwUgapBwAiizTLLLPn5sOCCC6YI2TYuVc+dmG0wwsSzzz57XnfMOLjmmmsO+Ft11VXryjWGkCPwdsUVV6RtttkmzTfffLXrwG233VYL/q222mpp2mmnzfcR14Io9/bbb9d1Ia5de++9d1psscXysptsskldmbvuuistssgi+fqoK2ZMbLw+xe2Azz777PTNb34zTT/99Hn5uOVwq+WVV15JW265Zfr4xz+el4/64+/Tn/50Ov/88wds2ot+D6jUGwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBMSYgCNjkgFcJAl5zzTVp9dVXTzPPPHMefInbBq+11lrp6KOPzmuPmby+/vWv50Gg6aabLkUwKGb6iwBOFpYp+7j77rvndRefRKAwCw9OPPHEaZlllqkFeeaee+58HxEyitnCLARGqkDVIGD0c4011sjPgTjPHnjggbrud3PuFGcCLHvuZuWKQcAI0M0wwwx17Ytys84664D3su3jsfE60Kw9jUHAs846K80444xp6qmnzuuOIGC8V/yLa0RxX9nzVkHAP//5z2neeefNt4ngX8zEGNfCCCFn26+//vp5CLNX/a47kF4QIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMaggCBgk4NeJQiYbf5///d/edAlwjQxm1azJWb2ikDMrrvuWlsdt9yMsEz8RRgnC8t85CMfSdttt12KGQfPOeectNVWW6VJJ500Xx/lfve739Xt4plnnsln7orZwy655JJ8/RtvvFELBWb1N4aD8oKeEBgBAt0EAXfYYYe682fcuHF5T7s9dx555JHa7XqXW265vO44VyP42+xv6aWXzssVg4Bxfja7DnzoQx9KG264YTrqqKNSBPgi+FecGTCex4yi2XLvvfemyy67rBY67nSuH3744Xlbmt0aOGYbjLouuuiivFzU2SwIGH4f/vCH83Lf/e53sybVHuO25hF+ztq07bbb1t7vVb/rduYFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCggCNjno3QQBX3zxxTTllFPmQZff/OY3A2p+8skna2G+SSaZJD322GMD1hdn6LrxxhsHrI/bdBbDgksttVRdmY033jjf/yGHHFK3Ll6ceuqp+foIFT711FMDyniDwEgQ6CYIGGG6LIgWj0ceeWTe1cGeO+uss05e93777ZfX2/jkW9/6Vl6uGAQsliteB6699triqtrzI444Iq8j+hFhxMZll112ycu0Cv12CgJmdUYgsOjWLAgY+8jKfOYzn6kLJ2b1nHbaaXmZmK30/vvvz1bVHnvR77oKvSBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCEBQcAmB7ubIGBU841vfCMPunz5y18eUPOee+5ZWx+3BG62FIMwN910U7MiqRhYiuDNX/7yl1q5CANFuCbei8e45Wbj8txzz+VlolzclthCYCQKdBMEPPfcc/PzM8b/HnvsUet6L86doQoCNrsORIg42p/9xe3JG5fhDAI+9NBDddeVCCo2WyJQGLOlZu2OmROLS6frX5l+F+vznAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwlgQEAZsc7W6DgFdccUUecokw3sMPP5zXHrfvnHPOOWvrL7zwwvz94pNOQZgoG7cbzYI08Ri3C40lbhOcvT/55JOnzTffvOlfViYejznmmOLuPScwYgS6CQKecsop+TkS4z/qiKUX585wBgGjzbPOOmvelz/96U8DjttwBgHj1uXF68rjjz8+oD3ZGxtssEFe9lOf+lT2du2xzPWvU7/rKvSCAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCGBAQBmxzsboOAH3zwQZp33nnzoMuuu+6a137BBRfU3p9nnnnS+PHj8/eLT8oEYd57773a7YWz4M3PfvazWhX7779/vt9sXafHgw8+uLh7zwmMGIFugoAxA2DxnMgCub04d4Y7CPiRj3wk78uEDgLutddeeVvitucRem617LzzznnZuJV6XDOzpcz1r1O/s7o8EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGmoAgYJMj3m0QMKraZ5998qDL7LPPniK4F0vcDjhCSBE6arWUCcLEtjPNNFO+jwjhxLLpppvm7y255JLp6aef7vj3+uuvt2qK9wn0tUA3QcANN9wwP0fiXIzzPJZenDtjOQi4ySab5K5x6992S/H6GLOmvvbaa3nxMtc/QcCcyxMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ2AIGAdx79eDCYI+Pe//71uxr647WjcKjNmyppiiinSs88+22SP/3qrTBAmSk433XR58OaEE06obfyDH/wgf2+RRRZpuQ8rCIwGgapBwJh57r//+7/zc2TaaafNQ7q9OHfGchBwxx13zF0nnXTStjMC7rbbbnnZaaaZxoyAo+Fk1AcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgLwQEAZschsEEAaO6bPa/mHUsnu++++618Ms3vvGNJnv7z1tlgoDPPfdcHqSJ+v/85z/XKjj88MPz9yNw+Pzzz/+nYs8IjDKBqkHAk08+OT8/4ryJmemypRfnzlgOAh511FF1tk888URGO+Bxo402yssuvvjidevLXP/MCFhH5gUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAIBcQBMwp/vNksEHA3/72t3nYJUJHMUtWPF599dX/2UmTZ2WCMEcccURed9T78ssv12r605/+lL8f+/r5z3/eZA/eIjA6BKoEAV999dUUt+mO8yL+5pxzzvTmm2/mEL04d0Z6EHCJJZbIPRqfvP3227ld+D366KN1Re6888669REMbLbEbdJnm222vOx3vvOdumJlrn+CgHVkXhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEcgFBwJziP08GGwSMwEsxsBLhmYUWWug/O2jxrFMQ5q233krzzDNPHqSJW5oWl4UXXjhfN+uss6a77rqruDp/HrdJPe2009IVV1yRv+cJgZEkUDYIGOfycsstl58XcS7G2G9cBnvujPQgYAT0xo8f38hSe90pCBiFll122dz4s5/9bN0tf7NKzz///LxMzFr64IMPZqtqj52uf1GoeF2NAKeFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgXwKCgE1GQgToIjCU/V144YVNSrV/a6eddsq3j3piJr9OS7sgzIsvvpi++MUv5nXOMcccKWY6Ky6///3v08QTT5yXmXHGGdOpp56annrqqVqxv//97+nMM89Mn//852tlDjzwwOLmnhMYMQL77bdfPs6nmmqquna/8sor6aabbqrdknvKKafMy80000zprLPOqiubvRjsubPGGmvk+9lrr72yagc8xu3Bs+vKzjvvPGB9hO6Kbb7qqqsGlIk3Zp555ryeZoHe7bbbLl+/4YYbNq3jlFNOyctEm4499ti8XMw0GoHhWF566aW6cn/729/yctmTuEV5HIesb9tvv322qvZ4//3317V5l112qVvfq37XVeoFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAMCQgCFg72vvvum1ZcccX0sY99LA+0RLBllllmSV/5yldS4wx8hU0HPI3ZrrJQzDTTTJPfwndAwcIbxSBg3KrzRz/6UTrggAPS+uuvX3dLzfnnn78WdCpsmj898sgj8/1m+4/HGWaYoe796NPFF1+cb+cJgZEgsP/++6cVVlihLnQW43vRRRdNMatfBGSL4z57Hud1Foht1c9uzp24BffGG29c156YtW6LLbZIe+65Z76rCAJ/+9vfTnEtyNoUYb4od9JJJ9VuVRzhvaWWWipfH+ViBtBtttkm/fOf/6zVdfDBB6c111yzrsynP/3p9P3vfz898cQT6fTTT0+bbbZZmnzyyfMysc9NN900HXLIIXl74sntt9+el8naFLP5LbbYYrUw4kUXXZS23Xbb2utsfTx+4QtfqO3vL3/5S1195513Xn4b9Ci35JJLpj322CNtsskmKULJWR3RvuzWzPHYi37XNcQLAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAYFBAELBz0VVZZJQ+rZKGV4mOZ2/sWqkuLL754rb4I+5RZikHA4n6z53G73wgHZiGaVnVed911tdkDi7MDRh3xOkKEEV56/fXXW23ufQJ9K7Duuuu2PUdjnH/oQx9Kc889d23myx133DFdeeWV+ex2nTpW9dyJsFt2fjY+zjvvvPnuirN5NpaLGfsaZ91rLBMhv1iy2Twb18frmMl08803b9meCPA1Lt/73vcGlI8ZCddee+10yy23DFhX3O8ll1zSWF267777UnF2xKx8XHs+9alPpXPPPbdum171u65SLwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECY1BAEHCIDvpbb71Vm0kwgjAx81aZpRgE/PWvf53GjRuXTjvttBS3/nz22WfLVFFXJm4dfMMNN6QzzjgjXX/99QNuJVxX2AsCBHKBsXTu3HvvvbWA3tlnn127TnQKGudIbZ689tprtSBh3Ir86quvTnG7ZgsBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMDQCQgCDpFtBPkiBNhsFq5WuywGAW+66aZWxbxPgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRyAUHAnKJ3T9577720wAIL1IKA55xzTumKBQFLUylIgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAv8WEATswVA4+eST01xzzZUOPPDA9PLLL6f99tuvFgJcZJFF0vjx40vvQRCwNJWCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIPBvAUHAHgyF//qv/6oF/+JWwNNMM03t+SSTTJKuvvrqUrXHDILjxo1LsU3UEX+/+MUv0iuvvFJqe4UIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwKCAL24NgvuOCCeYAvC/IdcMABpWp+880306STTjpg+6yeDTfcsFQ9ChEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDA2BQQBOzBcb/uuuvSiiuumGafffa0/PLLp/PPP790re+8807aaqutWv4df/zxpetSkAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJCAKOvWOuxwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwigQEAUfRwdQVAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBh7AoKAY++Y6zEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjCIBQcBRdDB1hQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGnoAg4Ng75npMgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNIQBBwFB1MXSFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsScgCDj2jrkeEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMAoEhAEHEUHU1cIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYOwJ9E0QcKKJJkr+GBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDAxmDIy9GGBKgoACiAKYxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDIyaMSAIOAEFBpPgtK0EsDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGgDFgDBgDxoAxYAwYA8aAMWAMGAPGQIyBsbiMzV6PxSOtzwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUelSWZsAAAgAElEQVTlYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAY+VI6ycBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjEoBQcBReVh1igABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTGioAg4Fg50vpJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqNSQBBwVB5WnSJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsSIgCDhWjrR+EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMCoFBAEHJWHVacIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYKwICAKOlSOtnwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwKgUEAUflYdUpAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBgrAoKAE+BIf/DBB+nWW29Ne+yxR9poo43SueeeOwFaYZcECJQVcM6WlVKuVwIvvfRSOuOMM9IWW2xR+5yIMWghMFYE3nrrrTRu3Li07bbb1sb/ww8/PFa6rp8E2grccccdaa+99qqdF6eeemrbslYOjcBDDz2UDjvssLTxxhunQw45ZGh20oe1+l7S/KDE59VFF13k86o5z4h/138DjvhDqAMEeibgetAzShX1UMD3sx5iVqjK9aAClqIECBAgQIAAAQIECEwwAUHANvT/+Mc/0iOPPJIef/zxNqWqrfr+97+f5phjjjTRRBPlf7vvvnu1SkqWvvDCC9Pqq6+ettxyy/Tiiy+W3EoxAiNXwDk7YY/d888/n3bZZZe08sorp3POOWfCNmaE7v2aa65Jyy67bJpsssnyz4j4vBg/fvwI7ZFmEygv8OSTT6a11lorTTPNNHXj/6abbipfiZJjSmCsfO78+Mc/TnPNNVfdebHddttN0GM91v4746ijjkoLLrhg3TFYZZVVJugxGI6dj/XvJa3G+aOPPprWWGONNPXUU9eNCZ9XwzEqh2cf/n+b4XG2FwIjQWA4rwdlPMbK998yFmO1TJXvZ8ZLb0dJv10Petu78rW1+o5cvgYlh0rAsRkqWfUSIECAAAECBEaegCBgi2P29ttvpxlnnDH/P/Zj9odeLAsssMCAH/KGIgj43HPPpSmnnDJvf8wqZSEwmgWcsxP+6P7whz/MrzmTTDJJilCPpZrAscceW/uMmG666XJLQcBqhkqPXIHbb7+9Nv4bA0+CFSP3mA51y8fK587iiy8+4L8ftt9++6HmbVn/WPzvjJjFvfHaNBaCgGP5e0m7cX7DDTf4vGp5hRgdK/z/NqPjOOoFgV4IDNf1oGxbx8r337IeY7Fcle9nxktvR0i/XQ9627tytbX7jlyuBqWGSsCxGSpZ9RIgQIAAAQIERqaAIGCL4xazWQ3lrH1f//rX8/qHIgj4xz/+Ma8/+rHeeuu16Km3CYwOAefshD+OyyyzTN115957753wjRqhLbj//vvrLM0IOEIPpGZ3LVCcZUkQsGvGUb/hWPvc2WqrrfLPhgkZBBzL/51x0EEH5cdgLAQBs4vIWPxeUnac+7zKRsnofPT/24zO46pXBLoRGOrrQdk2jbXvv2VdxmK5Mt/PjJehGRn9cj0Ymt61r7Xsd+T2tVg7FAKOzVCoqpMAAQIECBAgMHIFBAFbHLu4pW4xCDj33HOnDz74oEXp6m8P9X8wvvHGG/ntq6aYYor0hz/8oXojbUFgBAk4Zyf8wTrxxBPz6+ZSSy014Rs0gltQ5v/QrdK9s88+O2299dbp2WefrbKZssMgMFzHZrj2U4asTFsEK8pIjt4y8T02rlm//vWv23ZyrH3uDEcQsMz5OVr/O6PMuOuXIGCZtrY9eSqu7PX3koq7nyDFy45zn1cT5PAM2079/zbDRm1HBPpeYKivBwFQ5nvYWPv+2/cDYwI2sMz3M+NlaA7QcFwPhqblg6+17Hfkwe/pXzWUuS72al/9XE8Zh+E+Nv3spW0ECBAgQIAAAQIpCQI2GQURlJhsssnyQEsWCLzyyiublO7ureH4D8Z33nknxb8EeuaZZ7prpK0IjBAB52z/HKhHH300XXPNNT0NTvdP74avJWX+D92yrXn//ffz2xnGsbH0j8BwHZvh2k8Z2bJtEawoozl6y/zqV7+qfQ9fYYUVOnZyLH3uDHUQsOz5GQdlNP53Rplx1y9BwDJt7XjyVCjQy+8lFXY7wYuWGec+ryb4YRrSBvj/bYaUV+UERpTAUF8PqnwPG0vff0fUIBnmxpb9fma89P7ADPX1oPct7m2NZb4j92KPVa6Lvdhfv9ZRxWG4jk2/WmkXAQIE/j975wFuT030fxUQkK6ISC8iKEWKgFIFC6BUKS/SBSl/ehVFehF8QUClCNKLdKSIgKI0AUFFuggoRUGlSdVXUPf/fFZnnZOzJe2ce+79TZ7n3uzZzSaTbzKTyWQ2MQQMAUPAEDAE/ouAOQL+F4vq6vjjj+9zAsQZcPPNN6/SpF5M6hPGVPzsfUNAI2A8q9Gw64mAgK9B16eul112WTWmmSOgD2LDSzOsthlWOT7I+dJijhU+aE7cNIsuuqi3I+DERaG/ZoN2BPTlz37KJsYdn343Ko6APrTmbJWceklOukYhLxuvRqEVBkeD2W0Gh63lbAiMNwQGLQ8mdT1svPWHUaDX9LOxa4VBy4Oxq9lolWxy8d/tYTiMVr80agwBQ8AQMAQMAUPAEBgvCJgjYE1LLbnkkuXiI5O6DTfcsHKgwMj/yiuv1LzRfevZZ58tbrjhhuKmm24qHn300WKDDTao8j3ggAO6M4hI8cYbbxQ333xz8f3vf7/461//2pkDXwzde++9xXXXXVfcc8893nX9v//7v+LnP/958YMf/KD4/e9/31mOJTAEciNgPBvGs+D/hz/8obj++uuL2267rUBW5ApPPvlkKXOQIW3hj3/8Y1k2NNx9993F888/35a88xlfR1IX5N1f/vKX2vSk+elPf9qahhdjaUvB9M0336zk7y9+8YvijjvuqMYIHNH/+c9/1tap6ya7ZS677LJVXiGOgNB03333FT/84Q+Lxx57rAC/3OFf//pX8cQTTxQ/+9nPimuvvbYsz2e8aqKDHXCh9ze/+U00ZrF5hOIV2zYvv/xy8cADD1Q6Bf0OHJtCbDnkl9Kn6+gJoaXJseKFF14odx2lndGtQsIg9ZW//e1vxUMPPVTqULfeemvxyCOPFPSJrpCLpmeeeaaU6U3y76WXXirlH3ISWn3CWPHn2WefXcksnx0BfccdqTM8zi7fyEN2xxhUQN+/8cYbC3i2LYTwWYgj4KBlReg8I1RGtmFGXnfddVfZhk19vu39ume+/a7JEZBx+sEHHyzHMuZFbXJZlx/DZ7606nJCr8FY5oWxeklI34a+WDkaO4fVmDTxq08/rxuv0GXoB8yrGbNDQsy4EIudD12hsqQtz0HwrpQXw0vyro4nst2GeobypcbG5zqnrG/iSx862tL45BvDh1JmrEzKiR2yi/kV8rsuwC/Ml5iLMxeLDYPQP4WWlDaQPFJlToo8CJWdIfMk6heq/+bsX7HzZWmXkNiHX8kvVra9+OKLxZ133lnyAuN1iM0jtI3b6u1bT9oxRj8L7S9CaywPpeAqZXfFucb9rnLkeYo8kDyI0dk4vQknTuqgA7bIa665ppzn6Pv6+umnny5tmvTbrjDIdvDRkZvo85UhoXJRlxcrE3QeXXyZQwb42CZCcQhtm5zjQ6zM0LjbtSFgCBgChoAhYAgYAoZAPgTMEdDB8v77768WH3FsYwImRwMTn3baac4bzT+Z1K2//vrFXHPN1ZOHzo9r7Qj4oQ99qHjnO99Z+8eisoTPfe5zfWmWWGKJctH54osvLjbZZJNihhlmqMptW+hkkXLppZcupphiiio9dE055ZTlLogsuteFK664olh88cX7jlFeaKGFivPPP7/uFbtnCGRHwHj2LRXfdvEsRqCtttqqmGWWWap34PVpppmmdHqOdcZDNh166KGFOGSS55ZbbtnX1hi5zjzzzGLeeeftKZ/0/C288MLlgkTfiw03WHTH8W+bbbYp3v3ud1d5aoOYpNl6662LmWeeuUrDwogOsbSlYAptOBTgqDfVVFNVtAkeOiZtaPjSl75UTD311D35Tj/99D1jx5FHHtmXLWMfNLnv0r923HHH0lGy76XAGxg4d95552L22WfvoY86v/Wtby3HsDonL5zNTzjhhOITn/hE8a53vavYaKONSiebww47rHj/+9/fk9fcc89dOoe6pOXIQ+cZg1dM21xwwQXFxz/+8b4xF8ze+973Fpdeeqkmq7yOKSelT/cRoG6E0qIdK+BXcF5ttdWKySabrGrnySefvOR/FlzbwiD1ld/97nelrqTpEt6dbrrpin333beWtFCaMGSfeOKJPX0f/ezLX/5yT99/+9vfXqy33nrVxxw4u6277ro9Oh763t5779238CCEjhV/sqiFHiz4EdPGrl6MPPQdd6ROr776arH77rv3jX+U8YEPfKC46KKLJKl3DB04Vn7xi18s84BOdHDK2njjjat6vO1tb+tzBIrlMx9HwEHKCgz7ofOMGBlJOXxAxTgx33zzlX3gl7/8Zen4t+aaaxbTTjtthS9tSDocBmJCSL8jf9cRkIWZr3zlK33jGfO/Nuf7GD4LpTUUjxx6SUzfjpWjIXPYEH7lY5aQfq7HK+bgiy22WAHfa1nGvTPOOKO1SULHBTKLxa6VkP88jJElw+RdqUMML8m7Ek90u00MXwo2vnGMrA/hyxCH2pR8Y/hQMAqRSfIOcQx28BofNeyyyy494ySOfauvvno5vxcZxNz7wgsvLIvEaeGII47om4/PP//8pTO7povrYeqfUnZIGwxC5sTIA6GdOEZ2+s6TQvVf6InpX7nnyxof9zqFX2NlG7Yf2sm1H8Az2IzQKxmz9R90Sohp45R68m6M3cinv+TkoVRcBd+uOMe431WGPI+VB8wPTj/99GKdddYpbZVLLbVUmSUfv2q7JPYsnDSPPvroYrnllittYfTDNdZYQ0go46eeeqpM89GPfrRKg6yvC4NsB/qLj46cQ4b4ykWNQahMiOXLGBmg6QyxTfji4Ns2mo6Y8SGnzNC02LUhYAgYAoaAIWAIGAKGwGAQMEdAB9d99tmnNNrPOuus5ZeAfA3ItRjRVlhhBeeN+p/nnHNOj/GNRSwWBbfbbrti5ZVXLp3sJE/tCIjBYZlllqnKIw3OMSwqMNmVcPjhh5cOAZIHk8pdd9213JFP7um4zhGQL4RwoJF0OFVsu+22pYEBBz+5z0RTB3Y4+MIXvlA9xwGByeunPvWpngV6HDMsGAKDRsB4tptnaQMWJuaYY46Sb3G0WmmllUq5grOU8DpyCiNUaNByRPKqcwREJshzHN8+/elPF5tttlnpxCT32bHPN+AEI+/pWDsCslOQfibXriNgDG0pmGIYxJlN6GERmTbByYPdaHW7kEYbnn3xwTFzpplmqgyF5INjEvfk76tf/WqVHU4UjCP0D9LiaIFD0W677VZ88IMfrGjlPjsWxgZ2ppBxFSdUxg/KXWSRRaoyKB88dDjkkEN6npOGP+oi126MQ7x2Zs+Rh9CUgldo2+gxl/EZh0xwAz9dZ74s1yG0nJQ+rcutuw6lRTtWuE4/us5c03/qwqD1Ffh4ttlmq9qARdTPf/7zxSqrrFLxEXyuQwxNRx11VJWfrjuOvfq3vv7Yxz5WLgTre+71cccdp0krr8eSP3HGg5912+MIKPJKYuSh77hDpdiNW3+Us+KKKxbIfBwDRd4RszATEtB/XUxPOumkAr3Zvc/OtxJS+KzLEXDQsoJFHbdu/K6bZ8TKSJwUZpxxxr5ytMN/HQ16PiVY+8Qh/Y78tCMgdOj+6tKFQz1OFG6I5bNQWt1y237n0Eti+naMHI2Zw4bwK7LAbUt+1/VzMHX7AHKLj25Evui8DjzwwL5miBkXyCQGu77CG27EyJJh8y6kx/KSrvZEtttQzxi+1Ph0XcfKevIN4Us9jnbRFJNvLB9CS4xM4r1Y7NhNio8PtGzhuu6epOFDEfSetnnTe97znp5do4epf4JHaBsMQubEygPpkzGyk3d950kh+m9s/8o5XxZc2uIYfiW/FNnGR6LCG9jAjj322NJBtk3XBE9CbBvH1jNFP+vqL7l5KAXXtj6in+UY93V+bdex8uDkk0/u+yAEOwF6vPvx4Nprr13aMqQ/Suw6AmKTk2cSNzkCDrIdfOaCuWSIr1yUNoyRCTF8GSsDhM5Q24QvDj5tIzTEjg+5ZYbQY7EhYAgYAoaAIWAIGAKGwOAQMEdAhS1Of7Kgy04pEriWiRYxW4O3hVNPPbVKz64r7ODiBhw9JE934YqdbXDKk+d1C7XkhzGUNCyWYTQjoMxzXJ67k2HdwoWenLFAytEgEnAOkfKJmVBIYHcbefbZz3622nWG5+zOJc+Y4HZhJXlabAjEIGA868ezHDUgO4TiUMMxZRJef/310uFK+LbOgU/SNsUchYfc0XLNzQcZIrujIBu0wyGLGbJLYYgjIF8iUi5/7FYnddCOgDqNdprSjoAxtKVgSn0xOAu97L7nOgrw5bE8J45xBJT20otCbbsTadnOIpHeeYOvijfffPOKJsYovjCPCYwbUjccyHW45JJLqmek0e102WWXlU6JOOTL+8Q4KR588MHlTrR8dY0DlH7OIpaEHHlIXjnw8mkbjmfW9bn99tuFhAL+1R8PNBmDfcpJ6dMVQR4XPrSQjXaswJlm0003LXeD5Ovr/fbbr+c5/I9R0g26jQahr/Bxh7QNH1Lo8PWvf7185joCxtB05ZVX1vZ9nGfRIc8777zi+OOP79uRDFnL7mk8YycYdg9kIVhoZhcMN4wlfwot3/zmNysam44G9hl3yO+5557r2TGWL+p10HiwWBYS9thjj7JdZPwSXIlxutbOzeLAkMpnbY6Aw5AVIfMM3ddDxhTkGotd/OFQJbiKHGBexUIafV87CXMdcpyb29Y+/Y53XEdAPtg66KCDyjHolFNOKXA0FZqJ4Ts3xPKZ5ONLq6TvinPoJbF9O0aOxsxhQ/gV3cN3Pg22erz6zne+U+1OybjEMc7IA90nLr/88p4m0bwSMlbFYNdTcMOPWFkyFrybyksT3W4Ty5cNXaP2tu6/IbKezEL4UsbRWiKcmzH56nqE8CFFx8gk3tNlhmCH42HTOMlHdt/61rdK3VDP3UQGzTnnnAUfUiKbkFfMQ+UZ8Xe/+90KzWHqny4ePm2QW+akyoNY2VkBXhQ9zpx183Zf/dfFM6R/5Zwv67o1Xcfwa4psO/fcc6s+j+Of/mjQbUPsW8zp0Luwh7jPQ+blMfVM1c+6+ktOHkrBtalv1N1PHffr8qy7lyIP0CGR0Tj5afkqpzBxihI8yTPSPPzww6XeiT1B0ruOgJJGzwXrbD+DbgefuWBuGeJjP4qVCaF8mSID6GcptokuHHzaRvp6rP6RU2YILRYbAoaAIWAIGAKGgCFgCAwWAXMEVPhyjIZMulDuJeAMIfeJ999/f3nUF7/yyiuVQwtpOQazLmiHGdcRkPTsDChlsshfF9gRiDQcB+IGJgDyPrHrCEj99OLnzTff3JOFdgRkYU0W2Jl8ynt8OUV93cCXblI2k1QLhsCgEDCe/S+yTTxLii222KLiyWOOOea/L/3nShuLcBx5+umn+9L43MDBQnjfdQRk0VOeseDgBo4n53mII6DOQy/CakdAnUacISlHO5jF0JaCKY5pggWOOHXHmg7bERBHcJHt0OYuUoMji04coSm0s8ttTNBf2ruOgJSh2+mss87qK0I7YXBUrhtwmtRl0FZuSM0jF15dxjzoxulFMCfWCw48/9rXvlY9n2eeedyqlr99yknp07WFNtz0oYVXNU/feuutfblxjInGhd0BdBiGvsJRj0KDu5ucfJChHQFTaerqt8hPoYe47kMSnGZ1GhaYdNC8M1b8GeLk1DbuUC99RC87NbpBy726nbrc9HW/P/zhD/dgygK74MpCCHo+xxQRUvlML/6wwKTDsGQFZXbNM3LJyC45gLOD7s8ckxobfPtdFx+yUPyRj3ykogtnXDek8pkvrW65Tb9z6CWxfTtUjsbOYaXuIfza1c8lT91P63RQaNZOrXq3/ZRxIRQ7obcrziFLNCZ1Y3gu3k3hpYlut6GdY/myq4/I81yyPoQvpWyf2DffFD6MlUm5sOviNb0DPY4n7sdl6Cs42stYinOAG7rGvRz6Z0obQG8XDl0yJ4c8yCE7fedJXfpvjv7V1e4+c263L7X99uVX8kiRbehl0t/rbPF6bHU/EsrRxiH1zKGfgVdXfyFNKg+l4NrWL9xnKeO+m1fT7xzygLz1Rgn0OTZgEKd2HMKYlzNXlKB5znUElDTM76X/1jkCDqsdfHRkXZ9Yux319pGLKTKBMnz5MlUGpNgmfHCgLl1tk2N8oJxUmUEeFgwBQ8AQMAQMAUPAEDAEBo+AOQIqjDl+lwnVEkssoe7++1JPCpi8uQY0eQFDgkzKcJhoStflCHjXXXdV+ZCfu/MSOwCKs8T9998vxVdxl+Kvt4rnWGE33HbbbeUuYWxBztfCErbaaquKrr322ktu98TaiVEvdPQksh+GQAYEjGf/C2ITz7IoLkeTEYtT73/fLIpnn322SoO8wcEyJrQZGE877bRKdnAssOu0wwICRrDnn38+pugeI0TdIiyZisykjtoRMJS2FEz54lzToXdd0BUftiOg3jGCY6HYSbEuaGMei9ruToZ177j3rrrqqtLoyQ5KOKG6QRsvKc8NmoYmAynHs8pYXLejWGoeufDyMebBn0ceeWS54xSY6R16wQZekrpijKsLXeWk9Om68truddEi72rDog9P0690GIa+Isetgz96ndb54HVkmnaASKWpq9+yG5rGl0VPN7iLtU899VRPklHgzxAnp7Zxh7oJbxC7fYSK77TTTqWDM/oEOwnEBD1HqFtAlzxz8FmbI+AwZIXUpWuekUtGdsmBJ554oqeN63bPEZq7Yt9+18WHlKMX6ZhLuSGVz3xpdcut+51DL0np26FyNHYOK3X35VfSd/VzybOrn5JOy3/k0WOPPVa+ru+Hzq1DsRN6u+IcsqQLk1y8m8JLE91uk8KXXX1EnueS9SF8KWX7xL75pvBhrEzKhV0Xr+nxiB3d64J2FqQ+buga93LonyltAL1dOHTJnBzyIIfs1Hp8m07Tpv+CR47+1dXulNM153b7UttvX35NlW2zzjprpTuef/75fSRpXZsPO3TI0ca+9cyhnwntXf2FdKk8lIKr0OkTp4z7PvmTJoc8IB/tCAhvd31w7cNzWqbXOQIOqx18dGSf+vjIkC65mCoTaCtfvkyRAam2iS4cpH93tU2O8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwP/gyxdfosSuttpqxdVXX93zpx33MN7fcMMNtS2jj+g77LDDatNwU+fHJLMusF28LGCy+K8Dznk8W2655fTt6rpL8deTnLojq6qMnIvFF1+8ooldvfhC0v1jwUvoxqHEgiEwCASMZ/1QveKKKyp+5CgKl1/lt/As8cknn+yXuZOqzcCIcUaXwc4DODdxtECOIPKbMnychrQjYChtKZjqXczYiarpGMNhOwJytKG0D8a4psDXo5KOmGNacwUc3s8444xiwQUXrMpgtzs3+BgU9Ze6HBXshtQ8cuHla8xz6ef3a6+9Vuoiu+66a4UXfFUXuspJ6dN15bXd66JF3vXh6QUWWKCqO8eX6TAMfUV/eQ4/LL/88gUfcjSFVJp8+u3KK69cYVLnCMgOpJqH9THtTXRzf5j8GeLk1Dbu6MUXHOHRjQcRtE5d59wsZebgM7046e4IKOW4cU5ZIXl3zTNyyUgfOaB3BrnpppuExODYt9/58CELy8JndR+YNRHny2e+tDaVo+/n0EtS+naoHNX8FjKHlTrr99v4lfRd/Vzy9OmnHBMnfYL4uuuuK19PGRdCsRN6Y+MQWeKDSS7erauPDy9NdLtNCl/WYVp3L5esD+HLOjqa7vnmm8KHuowQmZQLuy5eu/HGGyvZ0+QICN0in+rmgT7jXqr+mdIGtH8XDqRpkzk55YHbH0Nkp+88qU3/pfwc/cun3bvm3C4Wbb81L7WNz6mybfbZZ6/6OzuyuUE7ydTZEdz0/A5pY9965tDPhNau/kK6VB4aBK5Cv0/sM+775EOaXPJAz0U5Oakr+PBclyPgsNrBR0f2qY+PDOmSi6kygXbx5cumNvSRAbo/xNgmunAQ2rraJsf4QFmpMkPotdgQMAQMAUPAEDAEDAFDYLAImCPgf/DF+UCMXz7xZpttVtsy008/fZXPhRdeWJuGmz6OgPqoz/e97309ea244oplOXXHJpKwTfFnxxp9/Me3v/3tnrybfrjv+eA0zTTTNGVn9w2BJASMZ/3g03LEh2dJc/TRR/tl7qTqMjBqhyWhBYM8xqy643Gd7Ft/+hgh9E582hGQjENoS8H02GOPrcYInL2bwjAdAdn9Tx+P6Tqeaxo5PkrajhgsUgK7pe2www7lMSk6X7mOdQTEmVXyqDPg+xglm/LIiZevMU8w5nhRHEA4WkW3mdQ11hEwpU8Lbb6xb519eJqjtaXu2hFwWPrKAw88ULz3ve+taBBaPvOZzxQcFadDDpp8+i39XeiocwSEDnlO3OYIOFb8GeLk1Dbu6H7N7giDCr4LB5oe3QZt1+547OsIOChZIRi2zTNyykgfOcBHR4LhqDgCstuv0NTlCEusG6AAACAASURBVBjDZyE8Im3WFOfQS1L6dooc9Z3D6rr78ivvtPVznadPP2WBWvoEMR/1pY4LIdhpekOuY2WJDya5eFfqE8pLE9luAyYpfCmYtsU5ZX0IX7bR5D7zyTeFD913fWVSTuy6eI1xUWRPkyOg3v0q1hEwRf90cRR622LXztiFA32jTebklAeUFSs7fedJbfpvrv7lM+9omi+7vOjz24dfySdVtm266aYVT7g7/v3rX//q6SdNH+2ntLFvPXPoZ4J7W3+RNKk8lAtXoccnDh33ffIkTS55oB2/huUIOKx28NGRc8mQLrmYKhNoc1++1H0oVM5rOmNsE104CG1tbZNrfKCsVJkh9FpsCBgChoAhYAgYAoaAITBYBMwR8D/4yhes7GbHV4F1f/qrMBRe94jNP/7xj5VBAaMVX982BR9HQLYNf9vb3lblKY4r4iAy44wzFhwRXBfaFH/3WIxzzjmnLou+eywWa2McX10988wzrX+xx6z1FW43DAEHAeNZB5CGn/qoBY7q7uJZnvM1Y0zwMTAef/zxPYY1kSnzzjtv33HBITT4GCHaHAEpy5e2FEy33377So7Sh5uCyHnBhwWS2NBlMHr88ccrmijvG9/4RmtRk002WZV+7733bk3b9JCv/BdZZJEqHxZmdt9993LcXGeddar7o+gImBOvrrYR/OBLjswSJ36+IMYZ8NRTTy13FZJ+EusImNKnhUbf2LfOPjzd5Ag4TH2F/rDSSitVfVbaAv1tl112qY4LzkGTjzE9ZSFW2nCs+TPEyalt3NHHzM0333xSveyx78JBDj7rcgQctKwQ8NrmGTllpI8caFvYF3p9Yt9+58OHPo6AKXzmS6tPvXPoJal921eOxs5hNQ6+/Mo7bf1c5+nTTzniT8YHYvpIjnHBFztNr891qizxwSQX78bw0kS329DGqXzZ1U9yyvoQvuyiSz/3yTeFD2NlUk7sunhtPDgCprSBtHcXDqRrkjk55UGq7PSdJ7Xpv7n6l4++MxaOgKmyDecs5tKMxdg1+C1B14e2eOyxx+RRFae2sY9corAc+pkQ3dZfJE0KD5FHKq5Ch08cM+775EuanPJgLBwBh9UOPjpyLhnSJRdTZQLt7suXpI2VAam2iS4cpI+3tU2u8YGyUmWG0GuxIWAIGAKGgCFgCBgChsBgETBHwKIoUITFEHDJJZc0In7NNdf0GPC/853v9KTF6U0b+DkWqin4OALyLgv9kicLygQcL7i38847N2XfunDBkaqSJ3HbEca6gJdeeqnnve9973v6sV0bAkNDwHi2+dhxtxFwrhJ+/9CHPuQ+zvrbx8BIgc8991zxxS9+sZh22mkr2qARw8Zvf/vbKJp8jBBdjoC+tKVgyu530h4ca9oUhukIiOOn0ETctiOge6woxr2QwFf27Kgr5fG19VFHHVWw06CEDTfcsHo+io6AOfHyMebdfvvtxcwzz1xhgqPX3XffLXAVDz74YPUs1hEwpU9XhHhe+NSZrHx4uskRcCz0FY5vYlcJ6dsSgy0hB00+xvQUR8BR4c8QJ6e2cWfPPfes2oMdNFMcqtu6t+/CQQ4+a3MEHIasEBzaFhhyykgfOdC0sC+0+sa+/c6HD9scAXPwmS+tPnXPoZfk6NvQ2iVHY+ewGgdffuWdtn6u8/Tpp+wSK+MCMeN4jnFB6OjCTtL5xDlkiQ8mqbybwksT3W5DO+fiy6Y+k1PWh/BlEz11933yTeHDWJmUE7suXhsPjoApbSDt3oUD6ZpkTi55kEN2+s6T2vTfXP3LR9/RjnPMQVKCD7+Sfw7ZdtJJJ1VjMh9vLbXUUsViiy1W3WPufcstt/RVJ0cb+9Yzh34mFWjrL5ImhYckj1hc5f2uOGXc78pbnueSB+Q3Fo6AlDvodqAMHx05lwzpkos5ZIIvX6bIgFTbRBcO0ofb2ibX+EBZOWSG0GyxIWAIGAKGgCFgCBgChsDgEDBHwKIoHeEwyM8000zlZKYJ7n/84x89R8Atv/zyfUm1IvzlL3+577nc8HUE5NggWTSYZZZZitdff73gKE3u3X///ZJdX9ym+JOYY0Ek3/XWW6/v/aYbeuLBJMaCITAWCOC8ajzrh7xeLMYRAie8QQUfA6Mu+89//nPxuc99rpJFtOmRRx6pk3hfa9mLcaYu+DgCyntttKVgimObyF7a49VXX5Uie+JhOgJSsIwr0Lb11lv30KJ/4Kgp9BPjIB8SLrzwwup9vsC/+eab+14fdUdACM6Flx5T6xYbKIsjpAXz1VZbrcAArkMOR8CUPq1p8bn2qTP5aJ6+8847a7NucgQksS5nmPrKZZddVqCvSZvh8PzGG2+U9KfS5GNMT3EEHBX+1P1x1VVXrW17udk27nA0srQD8b333iuvZY19Fw50vWLH4zZHwGHICgGua56RS0b6yIGmhX2h1TfW7dPW73z4sM0RMAef+dLqU/cceommJ7Zva1rb5GjsHFby9+VX0nf1c8nTp5/yAZ3IIz5AZE5PSB0XhAaJ27CTNF1xDlnig0kq76bykqZxItptcvNlXb/JJetD+LKOjqZ7vvmm8GGsTMqFne7HdfryeHAEpP1S2oD3u3AgTZvM0e/HyoMcslPj0DQ3pC5t+i/Pc/QvH31nLBwBc8m2n//85wUfyjI2TzHFFMVcc81VfoiPXtRko8nRxr5yKYd+Rl8gdPUX0mgeqJMlpGnjobKgoihicJV3u+LUcb8rf3musYiVB+SV4gj4qU99SsjpiY877rhKn1x99dV7nukfg2wHyvHRkXPJkC65mEMm+PJligxItU104SDt39U2OcYHytJ8kiIzhG6LDQFDwBAwBAwBQ8AQMAQGg4A5AhZFwa5MTP754q4r7LPPPtWki3ceeeSRnlfEkMAzFoKbju71dQTka51pppmmKlO2POeIz7bQpfgzYYRG/vgC8je/+U1bdtUzvcDMV5KUY8EQGDYCxrP+PKuN//D717/+9YE1V5uB8dBDDy1wnr7tttt6ysepaa211qrkETIuJmgnv6bdSnUaOWqdskJpS8FULwTTHsccc0xtdQflCNjkuIdDuIwJs846a4Hje13g2GBJx0L273//+7pkjfd23HHH6n2MbXVhPDgC5sJLG/Pq2ubZZ5+t8AJ3jMluCHUErCsnpU+79HT97qqzvO9jWGxzBBy0vvKHP/yhlGnbbLONkFzFv/jFL3rajSPPCKk0+RjTdRkYm93ArnjCw8RCG+lGhT+1MX+ZZZZxq9Dzu23cQc7rurIb6SCC78JBDj5rcgQclqwQ/LrmGblkpI8c8FmUFLrbYt9+58OHbY6AOfjMl9a2+sqzHHpJbN+OkaOxc1ipry+/kr6rn0uePv104403ruQRTv0StMwOmVvHYCdltsW5ZIkPJqm8m8pLE91uE8uXbf3DfZZL1ofwpUtD22/ffGP5kLJjZVIu7Lp4TfcDnBbrwgEHHFDJp7q5uM+4pzEM1T+hSb8fIgulPl04kK5N5qTKg1yy03ee1Kb/Utcc/cun3cfCEVD3afT8WPvW7373u2LGGWcs+/4DDzwgXakxztXGvnIph34mlenqL6RL5SEpKxRXec8nTh33fcogTao8kHJCHQG1kx801AWdps0RcJDtAF0+OnIuGdIlF3PIBB++TJUBqbaJLhykv3S1TY7xgbJyyQz5aFbot9gQMAQMAUPAEDAEDAFDIC8Ck7wjIDtHySJh0y5SGnK94M57X/nKV/TjQk/KeM5xvu7uPbywwQYbVOVieGsLW2yxRZVWaD3zzDPbXumclHF8kORF/IlPfKLxq8eXX365qgO7DOj3cJ6sqx/E4SDSVbfWSthDQ6AGAePZfzvw+vIsEC666KIV3/L1X9OuSPDyeeedV9xwww01yHffajMwbrrppiUNLF67AUc8kSsHH3yw+9jr9yKLLFLl0bSjXZMjYAxtsZhilNG7hXHNuOIGjNGCCXHKkZbaYFS3MEPZN954Y095V155pUtSScPiiy9epdtyyy370nTdYEyUemF456hhN3zyk5+s0mA8dEMOg2JqHrnw6mqb559/vsDhUjCrw+OOO+6onmOMqwtd5fBObJ+uK6/tng8tvO9jWGxzBBy0vvLoo4+WuFMfN2DMnGqqqcrn7I4l/TyVJp9+qxdS6/i9zRFwVPhTOzkhI9vkX9u4Q7vgSCj8wy6kV199tdtc1W+OposJPgsHkm8qnzU5Ag5TVlCXrgWGXDLSRw60LewL7j6xb7/z4cM2R8AcfOZLq0+9c+klMX07Ro7GzmEFixB+7ernkmdXP/3xj39cyaEpp5yyoN4SYseFGOykzLY4lyzpwgQaUnk3lZcmut0GjGP4sq1/uM9yyfoQvnRpaPvtm28sH1J2rEzKhV0Xr2kHiVF2BExpA9qhCwfStMmcVHmQS3b6zpO69N8c/ctH3xkLR0DaMods22uvvaqx+YILLiiwM+Lo0xRytbGvXMqln1Gfrv5CmlQeEtxCcZX3fOLUcd+nDNKkygMpJ9QR8NJLL636JPNWnPncoGlrcwQcZDtAk4+OnEuG+MjFVJngw5c5ZECKbcIHB5+2yTE+UE6qzHjooYeKlVZaqZh88smLz3zmMwVrjxYMAUPAEDAEDAFDwBAwBPIjMMk7Am6//fblRGvuuef2RneppZaqJmdzzDFHdeQbGaC4TjfddNVzFh/XWWedcuGRXfdwrlh//fV7FvaZoLUFnHJkEZMY54mmnQYlnxdffLHnHb3gQBocflC4db5MnK699triL3/5S7kbFLsd4pSD88xPf/rTMmsWY5deeume9zjaE0cEFrr5w4CCoYFJAQvhFgyBnAgYz/7bERDe9eFZsEfuaGcijkE/99xzi6effrpsGnYVwfi57LLLlrx91FFHRTXZrrvuWskGnOt0EGe7L3zhC/p26eAhOzxSJxZLYwJyVeQZRjNkF4YFjlCnrhytIc+Jb7311qqYGNpSMD3wwAN7aEGms6j/q1/9qjxK5eijjy5mm222njRNx9NUlWi5mG+++aq8Fl544VLGS3LkvQTGKsGInRjcHW932mmn6jnjwp/+9Cd51Tt2v2zXX/CzUKV31YCWI444oi9v2lboXHnllfuecwMHKEmzwgor9KXJkUcOvHzaRn+RDq8I7hgi4VVtEMTBoM4536eclD7dB3DLDR9aMCxTF2lD+kZdmHfeeas0GM51GLS+Ik4Y0CiyVMpH5gjt6FoSUmny6bfauMyRUm5AlghtxBz3LWFU+POcc87pofHUU08VEguc9XQfbxt3eOn6668vGBOkzuyCfdBBBxU4W9MeyMDrrruuQJbgYB8TNI+ecsoprVmk8hljqNSFXTF00HQMUlZQZtc8gzSpMtKVAyxa1AV9LGPshwzk69vvNB/ieFsXzj777Kqd+FBBhxx85kurLrftOodeEtO3Y+Ro7BxW6q/5pItfffo5+TYthPH+fvvtVzmGw7ssiuoQOy7EYKfLbbvWGMXIkmHxbiovTXS7DW0cw5dtfaPuWaqsJ0/d57r4so6Gpnu++cbyIeWmyKRU7Hx4TTsqTj/99D22S8FNO4xsvvnmcruK9bjXNPdK0T8pKKUNfHCgjDZ9IYc80P0tRnZCo888iXRd+i9pUvuXT7t3zbmhwzdo/LrkQA7Zht4vOrWOsYNgW+KjDveUBE1jbBvrPLrqmUM/A/+u/pKDh6SdY3CVd7vi1HG/K395nkMekJeml7Uj5FxbwHap+yL2XuY2jz/+eGknRafUDs0f//jHG7MbZDtQqI+OnEuG+MjFVJngy5c6XYwMSLFN+ODg2zap40MOmeFuevLVr361sT/bA0PAEDAEDAFDwBAwBAyBeAQmWUfAs846q8fIwiLhcsstV2y11VaNaHIkIgs++qheJmnsVsJRvxJQ7GUnGD2Jk2t2h5FrYn6zsHfzzTdLFj0xk8U555yzemfnnXfuea5/4BjAV3JLLrlklZ4yPvKRjxQ4cegjD1j81F9NCU04DE0xxRQ97+vdEp977rniAx/4QM9z3qXObr3XXHNNTZ5dGwLRCBjPFqXDQgzPAvoJJ5zQx7PwrRyHIvyP4ROH4JBw/vnnF+zCp+UGcpKjheToW3G2o5wVV1yx3C0UZwztWLzddtuFFNuTVu8UKXXRsd4xkPvgiEx86qmniljaYjHFaLLGGmvUtofQPO200/Y85wvVfffdt6fOvj/WXnvtnrzYFZI2mH322XscX3AwZxwUGnAwow3Z2ZUxRO7jpPiTn/zEt/iedDjx6EUY8sQoKveI9fjCMcX0Cwx7GFK5nnrqqStaeJ8ddi+88MKyHMY4dszSYyZfmNLGOBPlyEMqlAMvn7Y57LDDeurLODv//PNXzk0syEnbENO3dtttt/IrbaHVpxzSxvZpKccnbqNl1VVXLWnX/ZA6YfRE9xHHVRadtPMvaTDE0vb33XdfRcYg9RXthIEcZYdkjN0stIguxIchfASiQwxNPv0W58N11123py8gR6BLMNl///17ZC64ffSjHy2xRRaONX8KTnfffXdPPaCTj3DQbXEQ5eMUn3FH8jvttNP68iNPVx/HYTwk0N58DINzIfnxh9xETrV95BPDZ4cffng5R9FzEPoXY6847w9DVoTMM2JlJO/BR64cmGeeeXrkAE7zehED/BnrZWwPaUvSdvU7HGvrxqCNNtqoGoMee+yxUg5pJ2XmVaQRx9wUPpM6ddEqH3BJ+q44l14S2rdj5WjMHDaEX0P6OdhqR8APfvCDpaxCnxa5QLzgggs26k4x40Isdl19geexsoR2GSbv5uCliW63oT1D+dKnj+g0sbKePEL4UpfZdR2TbwwfCh0xMol3Y7HzGSfZ2QzdWc+JkEV8oCLjEfNn7JBaH0J3ZP7nO2/KoX8KjqFt4IMDefvqC6nyIFZ2MgZLaJsn4dgTov/G9i+feUfXnFvq4xPH8Cv5psg2xnnsIXqcrrtm93m9K1tKG8fUM1U/6+ovuXkoFleffkKaHOO+b1kp8oCNEpC/Cy20UE8fg4e53/bh0mqrrdbzju6X2LWwecg97K+bbbZZKa91vQbZDj46cm4Z0iUXpe4xMiGUL1NkgNAZa5vowsGnbYSG2PEhp8zADiV9mfjII48U8iw2BAwBQ8AQMAQMAUPAEMiIwCTrCKi3xteKJ04HTUHvwqHf4ZrFKR2Y2C2xxBI9u3AxSWORkWMgOVLRzePEE0/UWfRcf/nLX67Sy4JuT4L//OA4Xjdf/ZtdT3RgZxiO5eTrYJ2Oa5wtWOD8+c9/rl8pr3kPJx591Ka8j/GQxei77rqr7z27YQjEImA8+2/kYnmWt1kYZnc0vTsgfMtvDErszvbaa68FN9E222zTJz9EHuBARsAAL85e8kxivmo9/vjjq+Mzgwn4zwvIUL0YS/7kjUGBozrr5BXHJKfQFospRt1tt922zxETJzt2i2E3PsFHYhz4YsI999xTMLZJPhK/733v6zMaghMGNH18saRnnNhkk00KDEwpgbppAyb5k/c+++xTHsdz5pln9tG6xx57lM+FFjeW46BxYnWfye/11lsvSx667ql4+bYNBkqph8TLL798wXEzBHY1lvsS8yW7BN9ySB/bp6WsrriNFr2rhNRDxzirEdwdjXWaSy65pIeEQekrf/7zn0vnNO0EpunAUNukr4XSBG/ovPW19H0+ZNH39bXslljH15KOdiGMJX/qhsOBUWiTGCdA+JgdGH3GHZ3fbbfdVu76J3npGAcddmWlXUICztQ6H33NB0ZtIZTP9A4AuhyuWWSSMGhZETrPiJGR7g4Tbn1FDsguxu5zfjO2x4S2fqePZXbLFD7ESd59Jr/ZkVdCLJ/J+8RttLq7lOr3mq5z6SUhfTtVjobMYUP4NbSf6515aW/0apxvPvnJT5Yf6H3nO9/pcc6va4PQcSEFu7ry3XsxsuSJJ55o7P/gMgjezcFLE91uQ9uG8KXbF3x+x8h68g3hSx86JE1svqF8KOUR826ITJJ3Y7DzGSfb+FF2qj3ppJMaeRZ73jD1T8EjpA18cCDfEH0hVR7EyM6QOVuo/hvTv3zavWvOLe3pE8fyK3nHyDZ2mpdxmznAeeedVzpYYt/meErXNk4aHWLbOLaeKfpZV3/JyUOpuGqM265zjPtt+etnsfKgy6aBM1lTePLJJ/ucVNErWUtifqOPBpY5BvNjCYNuBx8dObcMabMf6ZMDwCBUJsTwZawMkDYijrFNdOHg0zaahpjxIafMOOOMMyodhA9r6fsWDAFDwBAwBAwBQ8AQMATyIzDJOgLmh7I+RxZi2KmI46z4cmZUw9/+9rfyCM3LLrus3HmJHS26tq2nLhyFwrFyV111VbnTEu/pI9tGtb5GlyHQhMBE51nq/corr5THeXPcCQYIfg8jIFPYHYvjf/k6maOKHn744WQHQE37Cy+8UMpb6saXuBg3fEIqbbGYvvnmm+VusIwT+ohOH5pD0kAfuGPoBveuspDjHEFCWhyrMLjmlO0YtO+8887yyB2czRmDxnNIwcu3bTDssTPi1VdfXXAdGnzLkXxj+7S83xaH0tKWl+8z2mgQ+grGUD58uOKKK8oj1pE77MLiEwZFk0/ZbWlGhT/5cAYnxosvvrgcq3Lo0c8880w1RrATjl58bcNkUM8GwWfDlhU+2KTISJ/8c6YZRL+roy8Hnw2C1lx6SUjfTpGjsXPYujYZhXuh40IKdl31TZUlXfnnep6Dl6DF5oDpLTKeZH1bbUP5UOcVK5MmCnYai5TrlDZIKVfeTZEHqbJzEPOkid6/fHUObE+yAyYfjNZ94Mi46n784e7wntrG0s9C4lz6WUiZvmlz4epbXq5x37e8FHngW4ZOh22SXTeZB7PWQn/zCcNuBx+acqUJlYu+MiGWvlwyINQ2EYqDT/3Gcnygz15++eXF66+/7kOqpTEEDAFDwBAwBAwBQ8AQiEDAHAEjQLNXDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUNgtBHgFArZRY3TGZrC3//+92K66aar0v76179uSmr3i6I83cNwHfuuYP177NvAKDAEDAFDwBAwBAwBQ8AQMARGDQFzBBy1FjF6DAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgGYGtt966cu7bbrvtGvNjh6w55pijTDvPPPM0prMH/0bAcB2NnmDtMBrtYFQYAoaAIWAIGAKGgCFgCBgCo4SAOQKOUmsYLYaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhkAWBk08+uXIEnGaaaYrHH3+8Nt9vfetbVTp2WbPQjoDh2o7PsJ5aOwwLaSvHEDAEDAFDwBAwBAwBQ8AQGD8ImCPg+Gkro9QQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBDwRwPHvHe94R+XkN9dccxXXX3998eabb5Y5PPvss8UXv/jFYrLJJivT7Lnnnp45T9rJDNfRaH9rh9FoB6PCEDAEDAFDwBAwBAwBQ8AQGCUEzBFwlFrDaDEEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMgWwI3HLLLcV0001XOQO+5S1vKaaaaqpilllmqe7NPffcxemnn15wRLAFPwQMVz+cBp3K2mHQCFv+hoAhYAgYAoaAIWAIGAKGwPhCwBwBx1d7GbWGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCAwFNPPVUcdNBBxUorrVTMN998xWyzzVasvPLKxQ477FCcccYZxd///veA3CypIGC4ChJjG1s7jC3+VrohYAgYAoaAIWAIGAKGgCEwSgiYI+AotYbRYggYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgEImCOgIGAWXJDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDYJQQMEfAUWoNo8UQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQCETAHAEDAbPkhoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChsAoIWCOgKPUGkaLIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIRCIgDkCBgJmyQ0BQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAyBUULAHAFHqTWMFkPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUMgEAFzBAwEzJIbAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAqOEgDkCjlJrGC2GgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGgCFgCBgChoAhYAgYAoaAIWAIGAKGQCAC5ggYCJglNwQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMAQMAUPAEDAEDAFDwBAwBAwBQ8AQMARGCQFzBBxQa7z++uvFj370o2L//fcvdtppp+KSSy4puOeGv/zlL8VVV11V7LXXXsUee+xRXH755cWbb77pJrPfhoAhMAQEHn30i8dgkQAAIABJREFU0eK0004rtt122+JrX/tacf/99/eV+q9//au49957i29+85vFNttsUxx33HHFI4880pfObhgCEw0B+v4vfvGL4qCDDio233zz4tJLLx3JKv7tb38rfvCDHxS77LJLSedvf/vbkaTTiDIEDAFDYCIgYDJ3IrRiWB3Giz4QVitLPREReOyxx8q52hZbbFEcc8wxE7GKVqchIvCrX/2qOOSQQ8r5xbnnnjvEknuLevHFF4vvfve7pc2CORky2cLYIjAqfWNsUWgv/aWXXiouvPDCqt/W2cfbc2h++tprr5V2vF133bXYcccdS/549dVXm18YgyfGt2MAuhVpCBgChoAhYAhkQsDG8UxAWjZjioDZR8YUfivcEBgzBMwRcADQX3TRRcXb3/724i1veUvP3+67795T2lFHHVW87W1v60nDOzgFWjAEDIHhIfDyyy8XK620Uh8vTjPNNMXTTz9dEfL73/++WHDBBfvSTTfddMUzzzxTpRvPF9///veLtdZaq9h+++2LF154YTxXJTvtkzI2OLTPNttsPX3/gAMOyI5xSobw57rrrlvAt3r8vfPOO1OytXcNAUPAEGhF4Lnnniu+9KUvFauvvnr54U9r4gn00GTuBGrMgKqMB30goDojkXRS1i8H1QAnnnhi8YEPfKBHH1xjjTUGVdxQ8p0U+smo1nGfffYp5pxzzp7+hMPRsMMtt9xSrLzyysXkk0/eQ8s///nPPlImVd2kD4gB3wjpG8Nqk2GV4wvtXXfdVay66qrFFFNM0dNvWVDPEa688spipplm6skbW8Caa65ZZn/22WcXq622WrHvvvsWfLwy7GB8mxfxUR0n8tbScjMEDIHxjsCojcXjHc+xpD9kHB9LOgdR9qD7sY3pg2i1+jwnon2kvqaT7t1B8+uki+zEqPkk7QjIV+Gbbrpp1N/Pfvaz2h5wzjnnFJNNNlnx1re+tdh7772L2WefvTJIzDrrrNU7Bx98cHl/6qmnLhcPZ5hhhirdJz/5ySqdXRgChsB/ERgEz7Ir5zLLLFPy31JLLVV+4a+diL797W+XBDz++OPFvPPOW6bDkPjpT3+64lnS33bbbf8ldJxePfvss8WUU05Z1YudES38G4FJHZuFFlqobwFs1BwB77777pJGd6HOHAGNiw0BQ2CQCOy5557VuMkHPjjITQrBZO6k0Mr9dRwP+kA/1aN7Z1LXLwfVMuyS5uqD49kRcFLoJ6Ncx6WXXrqvP+22226D6r6N+Z566qklHXyEqO0VdY6Ak6pu0gjegB6E9I1htcmwyvGFlJ36kcfvete7evotdrjU8L3vfa/6uJ56zzHHHFUZs8wyS3mSh+aVQw89NLXI4PeNb4Mha3xhlMeJRqLtgSFgCEySCIzaWDxJNkKmSoeM45mKHJlsBtmPbUwfbjNPNPvIcNEbH6UNkl/HBwJGZRsCk7QjIF8IaqNAyDVHGriBLxpx7COfz3/+8+Vj7m2wwQalYyDHBBPuu+++qtwjjjiivPfkk0+WO5Lxde/pp59e3rN/hoAh0ItAbp4ld3bxgWdx4JUjRDEoYqjEkRfeJHz2s58t080888wFOwgSTjrppJLnF1tssYLjSMZ7+MlPflLJJjDZcMMNx3uVstFv2Pwbyv/5n/+p+sioOQLqxn7HO95R0WmOgBoZuzYEDIHcCLg7Cj/44IO5ixj5/EzmjnwTZSdwvOgD2SueOUPTLzMD6mT3v//7v5U+OJ4dASeFfjIe6vj//t//q/rTWDgCSvf+9a9/XdHBnL3OEdB0E0FrOLFP3xhWmwyrnFBk//znP/f021RHQHa9wDYHD2yyySYlOdjf0U/4MH+//fYrzjzzzJ4yWSAbq2B8m478eBgn0mtpORgChsBEQGBUx+KJgO1Y1cFnHB8r2gZV7iD7sY3pg2q19nwnin2kvZaT5tNB8uukiejEqvUk7Qj497//vTz2E0c9jAf8sRvWww8/XPvH14OSrs4R8Nhjj62eM5jp8Mc//rH6yZGbks9TTz1V3ceAh3FkLMPrr79e7LjjjqXBZCzpsLINgToEcvPs//3f/xXvfve7S35ksNQBw6QcHcLuPnIEz9Zbb62TFXzB8o9//KPn3jB/5ORZ8pJjtDje/Oqrrx5mVUa6LMPm380zXhb+zSllpNlpaMRdfPHFpU4z1rrV0CpsBfUhMIw+cMYZZ1R6/XLLLddHw6Rww2TupNDKvXUcL/pAL9Wj98v0y8G2yUQxdE8K/WQ81NHH2WuwPfrfufssRJpuMoyW+G8ZPn1jWG0yrHL+W3u/q9yOgFq+X3PNNT1EiP39mWeeKd7znveUevq0005b/PKXv+xJN8wfxrfpaI+HcSK9lpbDRERgGDaJiYjbeK7TqI7F4xnTsabdZxwPoXE8yIVB9mPfMZ105qvg17N8sNL683j+UNIPkYmTykdeDJJfJw6Sk25NJmlHQGn2I488slrAm2qqqeR2X/zoo49W6eocAdkVTBz8+DqxLrz55pvFNNNMU6abfvrp65KM6b1TTjmlpG3VVVcdUzqscEOgDYFcPHv55ZdXPLvTTjs1FqnLw+F3lEJunsXZEkfmP/3pT6NUzZGgxbApyi/sZZyzHQFHolsaEQ0I4KAtxwLecsstDans9kRGYJh94PHHHy/oZ//6178mMqSNdTNHwEZoJuwDcwTM17SmX+bD0s1pIhm6J4V+Mup19HH2cvvgIH77LkRO6rrJILBvytO3bwyrTYZVThMedfdzOwLKB6zYJvQH9m7Zr776avHDH/6wOtXDfT6s38a3eZAe9XEiTy0tl4mEwDBtEhMJt4lQl1EciycCrmNVB99x3Ie+8SQXBtmPfcb03OuePu0zXtP4YDWR7CPjtZ1C6Q6RF4Pk11C6Lf1oIWCOgEVRaCefNkdAFvf4ihBDQ50jIO/y7G1ve1vt8Rw0PceMihPF/PPPP1q9oSiKRRddtKTPHAFHrmmMIIVALp7V+Rx00EGqhN5LjvoWvj333HN7H47xL+PZMW6ASaz48bLwb04pk1jHrKnuZZddVsltcwSsAWgSuGV9YHiNbDJ3eFiPSknjRR8YFbyMjrFBwAzdY4P7RC3V19lr0PXPuRA5aFonlfxHpW+MMt45HQHfeOON8vhfsdFx0seoB+PbUW8ho88QGAwCZpMYDK6WqyEwbARyjuMmF/xbz9Y982Jl9hF/PEclpcmLUWmJ8U2HOQIGOALS1Pfee29x5513Fi+88EJPy3M8qBghpphiip5n+sett95apVtooYX0o9ZrDBs///nPix/84AcFx5SGBHb2+vGPf1zuVIJXcFM4++yzK9rMEbAJJbs/CghoB742511obeJZnrG1tPDtEUcc0Vi1j3/841W6OifguhdTeJYvYqD7uuuuK+65557ilVdeqSuiGATPYlS9+eabi+9///vFX//619py5SZHrtx2223F9ddfX9x9993F888/L4+SYpyun3jiieJnP/tZce211xb33XdfJy1SIMc5P/TQQyV2yNtHHnmkYCfWphCSPgQbynvxxRfL8QJ8HnjggaQjpJHjfMn+m9/8ptHRvKmOf/jDH8o2oq2oQ0hgbLvhhhuKm266qWBX3A022KDihfG4IyBjN05hYEndQkIKT+tyUtpS51N3ncI7dfnV3Yvl+5R+KHTAy3fddVfZhhzf3hRYaFp22WWrvhriCDjW7ewr/92656LbzVf/jqWNdkOOw3ePPfZYkiwcdB94+eWXS3ktco9+67PLHx/6MG4yZocE3/roPGN5UOfhcw0Wt99+ezmHePrpp1tfaXIEjJW5se1QRyRj14033ui9EwzHyDFuN8mYl156qfjpT39a6j/oEL4hR51853QuTexUz1wQvaqpXu477u9R0gdixtEc+Asm6FJg+Ytf/EJu9cTIDPQ1+hH6bFcI1S/JL7YvdNHiPs9dV51/yrjlOx41Gbr/+c9/Fg8++GA5z8DG4iPnoT2nnhUq/337ScjcRreHvh6UHNRl1F371rHu3Ri5QD6+fYm0Ps5efLHPnAudoEnekoZxpC2NriN9RWwDyJ077rij0nGxZdCf68IwdRPKh070PPQ9MAida9XVwb2XkwfdvN3fufsG+Q+rTULLiRlTQsfVnI6A2MTFjvfWt77Vbbqe3+wIiH3rRz/6kbeslwxi5Qrvjxe+zWmzEtxC45BxK3ScyFm/0HEbHAY9b8uh40MnbcDJMDi6+OpEde0c0pZ17+e4NwwdBvnnM09OsUvR33LZUAahz49lW+fWBVJkfVufDR2LJa8YWSPvxozn8i6xr+0kxr6b0m6xsjSGTsEjdhyX95viUZELTfS590P7cYj+3DWmD2Ld061f6u9RGYd9scptHxH8UniNPHz0kNC5h9DmxsydmbPKejZ9NiSE1BXZdc0115RrWU1lYG/HLoDPkRtC5cUg+dWlzX6PLwTMEdDDERBFhQXB+++/v691ERTnnXdesfHGG/cYItZZZ52i7m+ZZZap0rG7oE5z8skn9+V/xRVXFIsvvngx+eSTV+9h8MCJ8Pzzz+9LLzcwduy+++7FLLPM0vMe73KEwkUXXSRJS6Pihz70oZ50lPfOd76z56/JwFhlZBeGwJAQ6HIEbONZSISXDz/88GKuueaq+j18oflRX3OMtxgaP/zhD/ekcw38sTwLXSxUL7300gXOxFIe8ZRTTllsvvnmlQMyCwE5eZbJzcUXX1xssskmxQwzzFCVXec4DLZnnnlmMe+881bpNK0LL7xwufgZ0xVQjnbeeedi9tln78sbAy/0NS0o/O53vysxmmyyyfrenW666Yp99923hyTf9CHYUAD4XHDBBcX73//+Pjpmnnnmsn5nnHFGof+QrRizTzjhhOITn/hE8a53vavYaKONSqeFww47rC+vueeeu1xc6amQ8wOD51ZbbdU3BnA0/YYbbtjqtInxb/311+/hD93Gch3jCEi/dccW+Y3BQgfGPp4tsMACPQtcKP70EXlP4gMPPLB6XTulsFiPE/1qq61W6P7BOLfNNtuUC4DVizUXoTyduy1rSOq7lcI7fZnV3Ijl+9B+CL+hbyEH5ptvvrKNf/nLX5aOf2uuuWa1K7P0QdLhSKDDl770pWLqqafu4T9kuPQTYsYQN4x1O/vK/1S63fd9fsfSBt/hkOm2B2Majvj0WzeMVR9AbuP07+rb9LX3vve9xaWXXuqSWjqaH3roocWSSy5Z9bctt9yyJ12O+pBhLA/2EFPzA4MD84/VV1+9YIxi7MHJYN111y13OBdeYwxmBzjGzrqQS+bGtANjKA4HX/ziF8s5DjyOTsNcSM/P2LEdp3yMNieeeGLPeIvD4pe//OWe8fbtb397sd5661UfY+BQDC5aR+N67733bl0wi6mTxjhkTqffY6zcfvvta8fyRRZZpLj88st18trrGH2gbZzn4wwJ9Dktl7n+4Ac/WD7OOY7G4A/f4ji5yy679IxFGAqhG11KeAN9WD4SAnM+LHJ1ZE4BwOlMB8rw1b3lvdi+IO/XxcOoq1tu6Hir3w8dj1xDN4bUr3zlK31zDeaEbR8NxOhZOeR/SD/xnduA57DloG5D99q3jjnlAjSE9iXeaXIEZBzCeM+84t3vfnclH7QxX9JsvfXW5XgrMoR5Sl0gPf0XPUpOHpF33Ji0EpCzw9JNpEzahrrr8REa0R2WW2654lvf+lbP3LfJiVryq4tjeLAuH597OfsG5Q2rTXzK0fWPHVNixlXKRf7qvuva0TRtTdfY37FFrbXWWj15adudvv7IRz7So9sjb3TILVfGC9/G2qw0djmufcct33FCaIqtH+Wk2iKgYVDzNqlfrI4PD55++umlPZuxaqmlliqz5ONr5oHCn8wHQ4NvW+aYJwxLh0mdJ8fapWJtKIOeu0if8G1rSZ8zjtUFcsv6tjr5jMW5ZI3QETKeM06F2E6kDOJQ+668G9tusbI0lk7ojR3Hpa5d8bDkAh8lIuddewu/WRshYOOse47PgE8/duvqqz/7jOm+656c2lZXB+4xN5GAo3tdOpznYsIojcO+WMl8MZd9BNxCeS1WD4mZe/DhHX0S/xjWFWl/ZB/1n2OOOSqdB92H39/5zndau0JIXXHGO/roo8t5MPNhylhjjTV68n/qqafKNB/96EerXc6xNergKy8Gya+aHrse3wiYI6CHIyAKIww755xz9rW2PJMJU0q8ww47VPmzE9cXvvCFSiixCMlk7FOf+lSPIwNOIm5g5ybt4LTiiisWpEPwifAhZrJHYICfaaaZCr2Ix0Io9/SfDBhuefbbEBg2Al2OgMKXdTwLrSiDKbyq3xXnpRSe5QsYTROOYNtuu22pnOAMJeWhHBBy86zgJeVIXOcIiCyR5yxKfPrTny4222yz0lFC7rPDQWhgQWDWWWct82aBFXm36667FixWS77EK620Ul/WKJKzzTZblY4FV45zXmWVVSqZh4OdhJD0IdiQP4s7Qi+OTMcee2y5KKwXheS5xDjUybWOkb/6t77GYdPdmVbqh6IrSi2yHsxwosSBUPKANhRTN5xzzjk9C9ykw5Fiu+22K1ZeeeXSKVXyiHEExHFLO8STF9u8b7rppgW7LElg1zcphxhlXQITLtLjMMAz6sjY+N3vfleS9IxnON3rvNxr+lldiOHpQw45pLas2Laso8u9l8I7bl5Nv2P4PrQf0q4zzjhjH35tvENbuv0QpyzwFn2HNDgDa33mq1/9alXVsW7nUPkvhMfQLe/6xrG04ZwJX0kbwIM4VO22224V39Iu3GdXGwlj1Qe0vs2Yi5MiY5B29oFejEc66HGb5/xpR8Bc9aHMGB7UtNZdf+1rX6vaSOgnxmFO/9bXOGFjsHCDnkPEytzYdmCOpGnk+qSTTirQmdz7OHZJv9TP9Acf+j7XH/vYx0rnL/e+/n3ccce5kJS/Y+skmYXO6eQ9dqjQzmjoUvAfjoy6X9PPm+Z3sfpA3TjPRyLoISwCSMCopXfcZmGQuXDOcTQGf75Exkio25frunuSBqdR5tltY/173vOe8itnqX+ofhnbF6S8unhYdZWyU8at2PFIG7ppLy2rpP0kxmmdxWU3xOhZueS/bz8JmdscddRRQ5WDLp7ub5865pQLsX0JupscAZmTST/SsXYExOlJP5PrOkdA2pO5q6Sh3zKfo3yc8vWcjjRajg9TNwETnJz1uE/53/72t8vxRuh3Y+R/SIjhwZD8Je0g+gZ5D6tNusqRehLHjikx46qUS7/WfSHGEbBJXuh8265ZfJaQU66Q53ji2xiblfvxneAYG4eMW03tXmevhJ6Y+vHBcQ5bBOUPYt4mOMfq+Hz45c7xsJtiW9Yfy8I/a6+9thTnFYe0Zeo8YVg6TI55cqhdKtaGMkx9PqStvTpPQKJYXSC3rO8iuWsszjVHEDpCx/MQ2wknP0kIte/Ke7HtxvsxsjSWTspLGcelvl3xsOTCb3/729LxSK+VId9ZJ91///1LMnF8x/FIxgbsRNi+WNPr6se6nqH6s8+Y7rvuSf/n4xDWUUX/wzaD3QuboATWmJhDSf9njeAzn/lM6SgmaXzjURuHfbGS+WIO+whYhfJarB4SM/doGheljzTF7vqS9InQurIe4pbhOgJin3XTuI6AvvJikPwqGFg8/hEwR0APR0AGRhgTxwo38EUigwm78wnz4kTHvbo/dpuQdPPMM09PGn10ELtXSbrPfvaz1W4UlM/XxvKMCRvbNktgO1q9aO4a2RgM5V2EhA7f/OY3q2d2NLBGxq5HDYEuR8A2nqUufMEGf6LgCj9gWK/jWe7pryPZ6UOnE2NiLM9Cj56Y4sTLlzsScFgRGolRmCXk4lkMDhzhw1bFuizXsEbZMkFA9mhHMpQs2YE0xhEQOSdl49SlwyWXXFI9I427YIKTmryLA6UOX//618tn2hEwJL0vNpTJl0hCB3JYO+pxpIM8I2YCAm20IV9hoQCusMIKPWlwdDv44IPL8YUvd3V/JQ+MYG7AMVV2dWRRhuOlJbz++uvlIpLQoR1WSHPqqadW5bOjAzsmuQG65f0mBdl9x/3NQrDeIQxnAzfsueeeVTmUx05PbpD+j2OhG/RCL2XhOMiOi3xFtN9++/UsBLM7GcYYN8Tw9GWXXZatLV16mn6n8E5Tnvp+DN/H9EP6J3zAn96VTdqP/sgEl36nFxy55ksvN2injbZdfsa6nWPlfwzdLkZdv3PQhmMMu7BJ4KtedrgVOYLBhq/XCGPRB1zZjFFHAvRox2V3Us7iN2OnlotaruaqTwwPSh3a4iuvvLJPXoljNcYhdiNgnMJZW9qLGGOEG1Jlbko77LHHHmU9RAfRtGLg0x8U4JxfN96SBtnCLu/HH398325h6DzsSsozdn9jPqfnVOwC7IaUOpFX7JwO+asdG9F5dMCgDy6CE86RbkjVBzAG6/agP9UFdnyFDvqPOATkGkdj8Yf2prGID1/Y2Yp+ouWYYIlhfZ999in4spwvivUR9aTRHyyE6JexfaEOc31vWHWVMlPGrdjxyDV045R60EEHlbr1KaecUvDBpLQfMbzthhg9K5f89+0nIXObOtlP3QchB10s63771DGXXKD82L7Eu02OgNgD0Af4Y14hfUo7Auo02iHbndcyp+ZDLMkDOeI6qLJbqzwnloUdaByWbkJZyBBOVRBa2IlQhw022KB6hpxHp+CP47hDQgwPhuQvaQfRN8h7WG3SVY7UM3ZMiR1XpVwW2KWvEMu4L8994lD7O+OuLlNsd5SVU66MJ76NtVkxh8sZQsYtn3FCaIut32uvvdao/2FL8rVFDGreRv1SdHzGJ/RbnPw0T8hOrpw6JR+0hDoChrQl9UiZJwxLh6krJ3aeHGOXCrGhDFOfD21r4csccawukFPW+9SjayzONUeAlpjxPMR2Io6AMfZdwSq23WJkaQqdqeO41Nc3HrRcEDrQvfSaJqeQaV0I2xbrfNi72FVUQlc/lnTEofpzyJgu6z6MW22+Ct/4xjeqsQ35ib5YF7DpkVfdnL8uvXtvVMdh6PTFKod9JIbXYvSQ2LlH07jIx7k46eHHw1ozc3v6vtaLOL1Ch5i6Pvzww6VdgDVqydt1BJQ02r7grjkIHV3yYpD8KjRYPP4RMEdAxxEQ5md3BP0nRzzWOQJKF9CCiYlUU9AOLUy06gKCQBaX8FR/5ZVX+pLx5ZYIEgSGBH0EFrthuUHy5V19jCLpfAcMN0/7bQgMGwHtCBjLs9CsJyQ4+DUFFo2E3+T4L502hWeRHZovb775Zp11oR0BcXjRDku5eRZlXOpJ7DoCcoycPGdRwg1MKHge4wioHZhdR0AUOHFuI/+zzjqrp+jFFlusokt2OpUE4tyoHQFD05NXFzakkQkFNNY5yelyXUds3tfKOLvkuIFFHo3TFlts4SYpuCdtdMwxx/Q914ZReIfjLgiMM3rRniN36oJ2eKmrY907dffYSVLo5AhjHTAyy46GkmbBBRfUScprFGSe1x3zqp1Sbr311r53cXCRvIn5SlGHFJ4mnxxtqelpu9Z9IpR32vKVZzF8H9sPpcyu9sMRQ7cfzt1u6JookX6s2zlW/qfS7WJV9zuWNhzZ9ZhWd/woMl0vHKO7umFYfYBFHd2XtCMgNLEbgDznA566wEc3kkY7Auq0KfWJ4UFddte1lld1hgf0DtdZBiOODl3165K5Odrhwx/+cNUOtAcOWRh0CSwAMGZxnANB17luvEWPkTYlrtvxD0d9nUbKKgsoinLBUD8P7Vuxczr6oJS7xBJL1DpK48gmaVjUwqlEQi59APyljKY5L0cXk4adW9zQ1UZdOlGOPtXVr/WOXRictTMO9aFP6A8fcIRzg49+GdsX3LLafg+6rinjVux4RH27+hE6J0dHSl9Fl3dDqp7Vha2PTtPVT/Qcw2cu5INNDjnoYtn2u6uOPjR3yYWUvkT52lCPQ0Vd0O2tHQF1Wj2vdR0B+dBL+iN2yLqFrDZHQCln0LoJ5TB/Elo5KYDFbR20/kI6FnVjQioP+pQ5jL4xjDahrl3lxI4pqeNqDkdAaUvaS/pem/39scceq9KRXi9+S15d40SXXCGf8cS3qTYrwS01jhm3fMaJHPXTcrzOltQ2bmu5l9tmmqrj02Z6Ywl4gg/RxdkHpyLmN8ybQkJMWw56npBLh9HyIXae7GOXGpYNJXXuEtPWIX2pLW2qLqDbsm7+7yPr2+hzn3WNxaRPkTW8Hzue826I7STFvhvbbjGyNIXOXOO42w+afg9TLlx88cU9utBXvvKVkiych+Qje+yedaGrH6fozz5juu+6Jxtx6I+x6tZumYewDobjo96Mqa7eTfdGdRyGXl+sumShj30khddC9JDUuQe4aDnrzrt5Dn/ozSiWW265nuZPqavG2nUElEKwNcucpk7PIJ2PvCDdIPlV6LV4/CJgjoCOI6AwXl08LEdAnCKk/L322qu2d7GduqSR40I5qkvuEV911VV97+60007l4ivHM+HRrIPvgKHfsWtDYCwQ0I6Aus+71208C925HAFjeRYa9LEVSy21VB+cHInKUUDswMMOIzrk5tkuJfy0006rZAzGftd5CmMLRqPnn39ek+l1jbzC6MQuHTiruUEb9FCkdNBOYziq6YVYJgPQpA14oekpqwsb0sjRxvRDvi5xg148YtHRDT4KIkceSz93v4bCGUqOPCTWTqNSFkeeSBryuf7668tHOEhIvjjoaAzlXeJcjoBamXf5lN3boIVjr+SrZH6ziCyBL7ZlkqcdGOS5VvR9FuHc8TKFp6EhtS2lHj5xCu/45B/K9yn9UOjpaj8m7dJfiet2/POZKI11O8fK/1S6Bee2OJY2vUsWX9vVLbhRruYRJt3ubjfD6gPIRHQKxh7+3EVqvcgNTXWha7LNOyn1CeXBOhrb7um2aDJOsPu47AgMz7HLjw5d9SOtdnxwZW6OdtDG7DqHK01vV53ZZVTBDacfAAAgAElEQVTLEBb83OAuMLlHJqfUKXZOx4K3HuPr6KYe6DR6fNUfleXSBxiztZzWehg0vPrqq9XOhHW7Q3W1EXm06UQp+Etbd/VrbbRjd9O6oBfc6hweu/TL2L5QR0vbvUHXNWXcih2PqK9PP9LtWLfjaaqe1YWtj07T1U9i5jZd2OSQg219zn3WVUff9myTCyl9ifL1XG4QjoDMW/VYqXcR1XgNwhGwbr7U1Tc5Aljk/AILLKBJLK+Zi8pzYr1TfV/ilhupPNiSdfVoGH1j0PqiVKatnJQxJXVcHQ+OgE16cJtcGW98m2qzkn6WGseMWz7jRI76pYzbg5q35dDxaTO9AM9cRz4MTmnPmLYc9Dwhlw7TpSuBW9c8Wc8p6+xW5DEsG4rWeWPmLjFtndK39LupuoBPW7bJek2Lz3XbWCzvp8ialPGc8n1tJ6n23dh2C5WlKXTmHMelbbviYcoFaNFrOTjD8aEuH2uin6+//vqN5Hb14xT92WdMD1n31I6xdc6+8iEvRwLHhFEeh6mPL1Y+slCPFa59JIXXoDNED0mde1Bel5wljXb2gyfkVKPUuoZiPWhHwBR+BScL4xsBcwR0HAFxLOBrKP3HkYQIARTepuD7RaLPjoCLL754ZTDjCzJ2jnL/EMLQxB8LrQQtSFkEYkANCb4DRkieltYQGAQC2hEwlmehK5cjYCzPQoOefIVuTZ2bZ7uUcBQgkTvE7HJCW7hf/+dsc46MPOOMMwp2hJOy3S+VXIVt+eWXL49vbqIjND35dGFDmtlnn72ikQmGG7Rxh2N+3eCjIGoHOjcPtq8WjPgy3h035LekIT755JNLMvTxl4cddphLWvVbTx5TdgRkt0lNh3bm23HHHctn5K/bSre71LVplyEfRZ8FK6GBoz90SOFp8kltS01L7LUP7/jkHcr30jZgG9oPhR6f9tNflt50003yahX7GFbGup1j5X8q3RVILRextOkddDGkNgW+ehf+I+bYbh2G1Qd0mfoaZ+MbbrihPDZA6GTMqwtdxjHeSalPKA/W0dh2z0de8b7eyRWHcR186tcmc3Ve+jqkHXSfrfugQOfrU+eVV1656qN1DnXs0iR9g/jJJ5/URTRe+9Qpdk6n55ldNH3uc5+r6Ne77ubUB1ZYYYWqDMZzHTiWFRrZTbou+LRRm05Ulyf3fPCXd7v69Y033ljVr2kxDd1e+kmdTOzSL2P7gtTBNx50XVPGLc3boXMln37ExzvSRuyi6Rt89awubCmvS6fp6idaX6YuXXMhyvTBZlBysA7jrjr60twmF1L6EuUP2hFQ757L7so4MtSFYTkCUnZb39THyPORoLszrrsbW52+Xlc/33u+POiT3zD6xqD1RalnWzmDGFN8x9Xx7AjYJlfGG9+m2qykn6XGMeOWzziRo34p4/ag5m05dHzaTMsATprKEWLaknIHPU/IocP46ErUpW2e7GOXGpYNJXXuEtvWOfpZUx6+uoBPW7bJ+qbym+63jcXyToqs0bwcswas9Z4220kO+67UV8dd7RYqS1PozDmO6zq2XQ9TLkAHG3bgPyBzXolZU6k7iVBo7+rHuh+FztN9xvSQdU/st1IveOK3v/2tVKOcU4kjM7wTE0Z5HKY+vlj5yMI2+0gKr0Gnll2xeojv3IPyfOTsXXfdVfUd+tBFF11UdpHUuvpgrZ0uB+0ImMKvJSD2b1wjYI6AjiMgBiw34BSIEBiGIyC7MOkjhGQAa4unmWaakmR9hChfwYUG3wEjNF9LbwjkRkA7AsbyLDTlcARM4Vn3Xb6qDwm5edZHCd911117lCNkEwsEKC51RxeF1EfSsmvMDjvsUB5TUSf7tEMY7/ClBseou2n5ygcnbTeEpud9H2w23XTTigZ3xz+21taTrjonOh8FEcc9qafrCKjHAEnTFR999NElPNNPP32Vb90W6oJhLkdA8tPGLnGyYMFLjihmket73/teRZfenvsLX/hCeZ9JaV3wUfQ5bkvw0Y6ALl9KmrZYxmGhJbUtJZ/QOJR3fPMP4fuUfij0+LSf5qe6hcUuw8pYt7Nbvq/8d99r65fyzO2fgnNT7JbhSxu7/+ljgdlhrymwWCz0EdNvdBhGH9Dlcc2xsYyrfD2q6yF0DtoRsK1Ph/CgW6+u3z7yijwOPPDAqs1oHx182qtJ5up8uI5tB23UaDNmU4ZPnRljpe1ljNK0wifynLjNETC0TlqOhszpDjnkkIomdnBsciKhHuyaKPTzUQ16CiGnPnDWWWdVZcA/L730UgWhfNR2+umnV/f0hU8btelEOq9Q/OXdrn7N2CMYNjkC6h0WYxwBY/uC1ME3HmRd3TFFMGuLZdxy3/Udj6TePv2IXdeEli5HwBg9qwtbaG2T/zzvmofEzG18sMkpB6VNmuKuOvKeD81NciG1L1H+oB0Bjz322KovNn3sBB3DdARs65vsgiS8Q+zu+HfSSSdVz9Gr/vrXvzY1v/f9GB7synxYfaNrMRU6c8iLtnJyjSkx4+p4dgRskiu02Xjj21SbVRc/+T6PGbd8xokc9Uvlw0HM23Lo+LRNjgV4t41j2pI8Bj1PyKHD+Ogd1KVtntxllxqmDSV17hLb1m6fSf0dowv4tGWbrA+luW0slrxSZE3qeO5rO9HlaJ2v7VrWGaSeEoe2W4gsTaEz5zgude2KhykXhBbt1ET7cTKL3pRB0um4rR+n6s8+Y3rIuif2rHnnnbeae2jHRHampM7zzDNP4wlYut5116M8DkOvL1Y+srDNPpLCa9AZq4fEzD0oz0fOMg6zU6bINVl3Tq2rD9bDcgRM5dc6nrB74wsBcwT0cATk6CJ2vlhllVUaWzfXjoAsIonQIWaQfuaZZ1r/5IhffeTOfPPN10hr0wPfAaPpfbtvCAwLgS5HQB+ehdYcjoApPOset3POOecEQZibZ32UcAg8/vjjexaKRWahcLvHBYdUiIX7RRZZpJKBLDzsvvvuBV8trrPOOtV9Uch03uwwxxHKQovELITvsssufYp+aHofbFBm5Ug+FEh+S9AGBSZ87JDgBh8FUefjOgLqYww4Mr5r7OA5X9H88Y9/7MENvJtCTkdA2W2XtqJ9CT/84Q9LWmQhlgUjUdppSxYQmNzNNttsZbqf/exntaTKO+Rdd9QVLzU5paTwtBCT2paSj2+cwju+ZfjyfWw/1HT4tF/bwiR5dRlWxrqdY+V/Dro11nXXsbS5O31+4xvfqMu+uqcn2nvvvXd1n4th9AEpEFmIDi0f4iDHcQZkpxsWtWU8GUtHQGj15UGpl2/sI6/IS+sc7Papncx82qtJ5gqdqe3ga8ymPJ8651g8iq1T7Jxuyy23rPorR/+2BXb/lb5Nn0d3zq0PsGO0dixklwUCRwFT9owzzti4q7RPG7XpRJQTi7/g1tWvUxfTKKdLv4ztC1IH33iQdU0Zt2LHI6m3Tz9qM3RLPil6Vhe2lNGl03T1E/IIndv4YJNDDgqGXbFPHX1obpILqX0J+gftCLj99ttXcpmdjJrCqDgCQp/eRXbVVVct55bc5yinRRddtKpP01HKTXV076fwoJuX+3tYfaNtMVVoyiEv2spJHVNSxtWJ6gg43vg21WYlfTVHHDpu+YwTOeqXgw9zz9tSdXxpL/AR/T92Jx7JS8ehbcm7g54n5NBhfPQO6tI2T+6ySw3ThpJj7hLT1rqvpFyn6AI+bdmkQ8bQ3DYWS34psiZ1PPe1neSw76a0m68sTaEz5zgubdsVD1MuaFrWWmutagzABnTzzTfrx33Xbf04VX/2GdO1bGWe0RW0jYsNQ3DwIrBRCGMfjl2xYdTHYV+sfGRhm30khdfAPlQPSZl7UJ6PnCWd5kmcPgmpdfXBeliOgKn8WgJi/8Y1AuYI6OEI6NPCuRwB2SlBJmXE7IbkG/bcc8/qXb64xdM3JPgOGCF5WlpDYBAIdDkC+paZwxEwhWfZflvze9uRrHV1ys2zPkq40PHcc88VOHJNO+20PXVAcdLbb0v6thjHrs0226zKh0Xjo446qud4oQ033LB6XucIKPmzpTu78WlcucahsC74pvfFRu98gOPaUkstVSy22GIVPTPPPHNxyy231JHi5ZjQZpigjlLvD33oQ7Vl1N3EmVzeI2YL8KaQ0xGQCaeUO8MMM5ROJVtvvXV5T39BuN5661XpTjvttNLZlPdwBpQdjFx6fRT9JqeUFJ4WOnyU/ba2lHy64py801UWz334PrYf6vJ92q9r0VxP4up4bqzbOVb+56BbY113HUsbjsXC08RtOwK6x6rCMzoMow9Q3u23314gl4VuFgzYCVzCgw8+WD0ba0dAaPLhQaHdN/aRV+R1zDHHVFjMOeecPdn7tFeTzCWjHO3ga8ymPJ86py4epdQpdk6HQ630ZRxttbNmT4MVRbHffvtVadl9jfFkEPqANqxzPCxBdvVtcwzxaaO2cTQFf8Gqq1/nWEzr0i9j+4LUwTceZF1Txq3Y8Ujq7dOP2gzdOfSsLmyhtUun6eonUl9i37mNDzapclDT1XXtU0cfmpvkQmpfgv5BOwKyI77I8AUWWKARslFyBEQv+eAHP1jRzUkBODHi6C114Sj6tvGosaJFUY5NuewETeUMq2+0LaYKbTnkRVs5KWNK6rg6UR0BxyPfptispK/mjH3HLZ9xArpS65eDD6Ej57wtVceX9gpdgJf3fGPftpT8BjlPyKHD+Ogd1KVtntxllxqmDSXH3EXaLrSt5b2YOIc+7tOWTTpkDM1tY7HklyJrUsZzyve1naTYd3O0G7T6yNIUOnOO49K2XfEw5YLQ8vTTT1cnMImOzoYefBDaFNr6car+7DOmh657/uEPf+jZ2Y0NlvgokfU5fCXQRWPDqI/Dvlj5yMI2+0gKr4F9iB6SOvegPB85S7rpppuumrvKqSWpdfXBeliOgKn8Gss39t7oIGCOgBGOgNdff31xyimnFL/73e+qlszlCEiGWhlAsfMNHFslAznxvffe6/tqmc53wAjK1BIbAgNAINQRsI5nISuHIyD5xPIs73KUmPAtDk8hITfP+ijhLn0o0Rj3pQ7EtE9I4ChaeZ+F67ovknwdAaXcyy67rGeCg8PiG2+8IY/74q70Idiw0w2OeNSJXZPmmmuucncpHBjbJlg+CmKbYUL3ByY4TJZ9g1aM9fbp7vs5HQH5MksvEmGUYgcjJmhM3iSwU6b0D75eO/jgg8vfLMY1BV2f0B0ByTOFp3k/tS2b6uXeHwTvuGXU/W7j+5R+KGX5tF/XorluwzpHQMrSaUL0LaEztZ1j5X8q3UJ/WxxLGwvAwq849jYFHMYlHfE111zTk3RYfYCj94SO1VZbrc+5eNQcAQWkNh6UNL6xTz8mrx133LHC6lOf+lRP9j7t1eYImKMdfI3ZEO5T59TFo5Q6xc7p2HFP+jPxU0891dNO+sfmm29epV166aWrR7otc+gDsvuf0PXjH/+4wPGQ321H0fi0UZtOlIK/gKGxqNMlciymdemXsX1B6uAbD7quKeNW7HhE3X36UZuhO4ee1YUtdHbpNF39pK6du+Y2PtikysE6upru+dTRh+Y2uZDSl6A71BGQxYu6wAdQIhP17ubMFeU+c7mmeeMoOQJSP+iURQx2F+GjPhy/OaLzjjvuqIPA+14OHvQpbBh9o20xVWjMIS/aykkZU1LHVXRX6d/Ef/nLX6TawbGv/Z1TGHSZsjOMLjBVroxXvo21WWnscl93jVs+44TQlFK/HHwodBDnmLfl0PGhJWQBXtch9LqrLSW/Qc4TcugwPvKBurTNk7UO3GSXGpYNJcfcRdpOYt+2lvQxcQ5dwKct23TIULrbxmLJK0XWpIznlO9rO0mx7+ZoN8GKuE2WptCZcxzX9LZdD1MuQAf6zworrFDqRGyqoE9n2XbbbRtJ7erHKfqzz5iu29VnR0AqIrv/of9xfcABB5T13njjjRvr6fNg1MdhX6x8ZGGbfUSXE7r2CM4hekjq3IPyfOQsO9nr+YLM4VPrqrF2bejS52QOTfmrr7663O6JfeQFLwySX3sIsh/jEgFzBIxwBMSoBXNecsklVaP7GiJ4RwQLwqwu6MkKu5QwMPoEjIiSNzFfzoYELdw44sOCITCqCIQ6AtbxLHXL5QgYy7PQwCAvfIsD1G9+8xtv2HPzbJcSfuihhxbLL798cdttt/XQyFdeentxtk4OCdpowmS0LjQ5AuIwBk3bbLNN32scUyzYEvMVUGh6ybQLG0lHjJO4OLg98MAD+lHrtVYQ11hjjdq0bYYJbdShvl//+tdr86i7KY6LvDfLLLMUHMlbF3I6ApL/RhttVLURjgiUDz/p8OKLLxaTTz55+YwduWTXCRx8m4KPot/mlJLC09CU2pZN9XLvp/COm1fT71C+T+mHQoNP+3UtmuuJkutkJuWMdTvHyv9UuqX+bXEsbXoHz1lnnbVxBxiODYbf+WPR+Pe//30POcPoA+5kH4OEG0bBETCUB906dP32kVfs4MjRGtJme+21V0+2Pu3VJHNztYOvMRvCfeqs+QxDuxvYeV3wIEbHkJBap9g53T333NNDkxzFK3RJjCGYsV7oZ4c+CYPQB3SeMp6vssoqUmRt7NNGTTpRKv5CUFe/1uMdRvC6IIZnsK7Tj7v0y9i+UEdL271B11XzU4h9A5pjxyPe9elHbYbuHHpWF7bQ2aXTtPWT2LmNDza63ULlYFt/q3vWVkdJ70Nzk1wgj5S+xPs+joDaya/pdA+dRjsCkl7kMjE7DNWFUXMEhMZdd921pH399devIzn6Xg4e9Cl8GH2ja3EGOnPIi7ZyYseUHOPqRHUEHK98G2uzEn5q+8hW0jTFseOWzzghZabUL4UPBzVvy6Hjg03IArxg2RbHtqXOc1DzhBw6jI/e0TVP9rFLDcuGkjJ3SWnrFHlBX8mhC/i0ZZsOqfusz3XbWCzvp8ia2PFcyva1neg+g24ass6Q0m6hsjSFzpzjuODbFQ9TLkCL7CDJWhXjIyd86TlHk828qx+n6M8+Y3rMuqfbnuL0WLfpSFc76eejOg4Ljb5Y+cjCNvtICq9Bq68ekmPuQXk+clY7VtNfONGCkFpX7eSHrlMXdBr4qS74yAveGyS/1tFl98YXAuYIGOgIyMIJ3s4MloNyBORrGj0Ys0UxjjZ1gYVTFhgk4MAn7yK4rr76annUF4tQkwd6wGBxKPRoYcnHYkNg0AiEOAI28Sw05nIETOFZttMXniX+xCc+0fj1/8svv9wjC3LzbJcSzlf90Ei5bmCSJvVgx7aQsMsuu1TvMinBkOKGT37yk1UalFYJjz76aHkfpcgNGBummmqq8jlym3xD00ueXdhIOmIcJASLCy64oDzyEAW2K/go412GiUUXXbQqm69Km3aGZUw577zzihtuuKEkSyue0E6b1I07G2ywQZW/Hnu66tb0/Oyzz67yE8xOPfXUvuQf//jHe9KxgNZmTPJR9JucUig8had5P0db9oFQcyOFd2qyq70Vw/ex/VAI8Gm/rkVzPVGqW7ymrLFu51j5n0q34NwWx9J244039vDqlVde2VcM+qU46MP3W265ZV+aYfSB559/vnRCFNmjxxYhiJ1s5Dk01YWuyTbvpNQnhgfr6Gy65yOvvv3tb1c4sMMuCwE6+NSvSebmagdfYzZ0+9Q5ZfEoR51i53QcySh9dqmllqodyy+//PIqDfqR/ghlEPqA1leFtosvvlh3ob5rnzZq0oly4A9BXf1aGwYH5QgIHbF9oQ/UlhuDrmvKuBU7HlFdn37UZujOoWd1YQudXTpN2zwkdm7jg02KHGzpbrWP2uooL/jQ3CQXyCOlL/G+jyPgIossUsnXpp2RmxwBwUA7aXPNBwlu4EMzkaXEdXa7QesmmibsirLIxseB1113Xbnja92cXr/nc52DB33KGUbfGFabdJUTM6bkGFcnqiPgeOXbWJsV8ucjH/lIefLFpz/96aidHWPHLZ9xQvg9tn68nzJuD3LelqrjUzffBXjBsSuObUud76DmCTl0GB+9o2ue7GOXGpYNJWXuEtvWBx10UIH9gI9E0bljQg5dwKct23TIULq7xmLyS5E1vB8znks9QmwnsfbdlHaLkaWxdOYcxwXfrniYcuHSSy8t5w18gH3VVVeVpFFn2WSBuQQf/b7wwgt9ZHf14xT92WdM1+ODr68C68B6bk39Fl544b66xdwYxXFY6uGLlY8sbLOPUF4sr/Gurx6SY+5BeV1y9m9/+1sxzzzzVHNrjgPWIaWuwnv0QebKOOG6QdtffRwBm9a3yHeQ/OrSbb/HHwLmCFgU1RaxMCVOI21Bb1uuHQHvvvvuSmCw00GdMY58tdMDQqbO0YJ3ZWckaOKPozdZjMSYxh9blMLcCDNNMzskiRGO99hhDKWbyTr5cvwChjkGLhyOdNDHL/KudsbAuFdHq37frg2BYSGgd9fQ/b+u/CaeJe3aa69d8e0hhxxS93rZ7+ebb74q3RlnnNGXLoVn4auVVlqpyh/eQ8m49tprS379xz/+UTzyyCPlcagsGvz0pz+tys/Ns+y8Rvnyx0RfB5mI6Z1jeE79F1hggeo9jn0LCe7XOvoLMwwV+gsjaDviiCOq7MUYwf2nn366us/FueeeW9EExoTQ9JJhFzaSjhjZKhjqmB1Q2CUBhZp2dYMcecs7yOi6oL9SYVt3N+B0w+ROyuWoXXAQbHDewDlx2WWXLdMcddRRZRY4mU433XTVe7y/zjrrlM7kOAiQL7TrvN0doVxafH7jIMk4JfTikADWbjjhhBOqNKTdZJNN3CTVbyaTU045ZZWePlQX5p133ioNyrkOKTxNPjnaUtPTdJ3CO015uvdj+D62H1K2234YReuCPoJAHFp1Oi23mfTr46fkeqzbOVb+p9KtcWq6jqWN/JAdwtPIPcYwHXbaaafqOePan/70J/14qH1A70DAOCa0YHRAPmoDHXKlTheWXXCoM/zihtQ+HcODLg1tv7sMQTiNy0dQ1NHdocitX4zMzdEOOo9TTjmlrcpeMlob1jkuxg0chSj9nJjjrnXQ9MT0rdg5HXNE+QgCunbbbTdNVumcoeUn80kdBqEPMK5rmlgIanPmh57UcTQVf7df141F2vDNUZh1ddIL0RzH7AYf/TK2L7hlNf0eRl1Txq2U8Uj3IxaE64K2z+DEpUOqnuWDLeVpnqzTadr6SezcRmPTNO9IlYMay67rtjrKuz40t82VUvoSNDAHFrnPLid1gfmSpMEuB80PPfRQcf/995dzMo4FkufEt956a082Bx54YM9zPpJjYedXv/pVgV3j6KOPLjjSS+dRd4TwoHUTTTT2CU2PXFN/HNLZdaRu4UPn0XSdyoNN+br3h9E3htUmXeXEjimp4yp2COkbxD4fSrrtJL997e8PP/xwT5ks9rkhVa6Q33jk21ib1RZbbNGD6bHHHutC2vk7dtzyGSek8Nj6pY7bg5y3per4YKNl6hxzzNG4diU4dsWxbanzpV0HMU/IocOkzpOpp49dinTDsKGkzF1i2pr5sZb72KeZE4QG3W/JL2TNQMrKIeslL5+4ayxOlTXQEDue864e07tsJ7H23ZR2i5GlsXSCR65x3KdvkGZYcgFbLHYK+Ma1+9x11109PgR1R+d29eMU/dlnTI9d93R3PGxznvJtM9KN4jgs9PtipWVhjH2E8lJ4TcuFLj1Ey6kYmyq0tjkC4vwqR2bDI8yxX3nlFYG0jFPqyvxfj4Gs92PrefzxxwvWzvfbb78ep1U2QakLvvJikPxaR5fdG18ITNKOgF/96lcLjiXSDgMwJwbgpj+UVmFgHAH5QpcFTQxccp94tdVWK3beeedCjvq45ZZbyi+IWRDV6TAE8oUECrUOzz33XPGBD3ygJy3vMTnSEyTurbnmmvrV4rTTTut7j3R6EY/f7tnk2pjCc/6o15JLLllipB2Qegq0H4bAkBDIwbOQyqQRA5bmJb4W2XbbbUtjvVQHJRleFn4gZqFm++23L3BM0iGFZ3FK0V8YSHk4XU0xxRQ95aN0SsjFszg8IIfgdSmbmC99kW9yvK1MxHi24oorlk7UOBprx+XttttOyPOOcTTWC2DkjzIo94i1PGQBmXJQxrQxgoUSdlBFqUX5kfbFwU12uwlN74uNVJb0YKNxrLtmZyRZEEEJpj4ce6vTsvvehRdeWGZNG7ALxZxzzlmlwemcNtFO2yR2neYkT/CRa2LGIxxOJWBEEMx0Orl2xxB+syCWur26OCVSDs65dYGFA+2EWLeTEAvwODwst9xyPfVEYWY8FucvJoB6kY5ymVCA73333VcVH8PTuduyIqbhIoV3GrLsux3L96H9kOOo4Vu3/fhoQrcfC6DaSEr7obMhq5566qmKfu3oTRp2yIQ3Z5999p4PIca6nWPlfwzdFTieF7G00Za6HXGm40hMnPgZV2gP/phk/+QnP6moGYs+cNhhh1X0QBMycP75568MYvqLT55zbDtyBsPt+eefX7Dbjx6np5lmmrKuOMvlqk8sD1bAdlzoBQ6MhDgrHX744cXee+9dfPSjH61kL07bfB3JF7aEnDI3pR0Y8/lgSjuV0+cYV12HdR8ZjfP8uuuu29Mv2MUA/ULGiP33379H96FvgBWySuRQSp2kyWLndOy+pj8Mgzb0NXbf1HNZ+i/91A2D0Adw4Acn/sCvKfi0kY9OFIs/cq9rLMJ5gbbWOhn14qMTcRpFX0dH0roT/QhZiN4Wql/G9oUmnLnvI6Ny1FVoSBm3QscjTkWo06032mijSrd+7LHHSt1PfxiCrkkaacdYPQs9tasfgUuXTuPTT0LnNj48lksOStu3xT519KHZRy5AR2hf4h3GxK222qpgjBc5xvwSGSofVUkd4X1JUxejs+r72AC0Dot+ga6h07jXyBJ9j11d9t1335KEYekmUl9i2kfTU3ctTpH6PZ/rWB7EThAaBtU3htUmXeVoPCXbILgAACAASURBVGLGlNhxlQ/a6eN6zk8fYeGNsZSj1nyDr/0duUjebpk4h2H34sOfnHJlvPFtjM1K2kg7V9GOTfYbSV8Xh45bPuOELiemfj46EWV0jduDnrfF6vjwITyx0EIL9chreIL7dR8haEybrkPbsimfnPOEnDpM7DxZ19PXLkUfHJQNJYc+H9PW2vFQdAPWMUJDii6QU9b70N01FueSNUJL6HgeYjuRMohD7bu8k9JusbI0hk5oTR3HNVY+14OWC3feeWeBYx/2b+E9frPrpdSXtU13QwgckZjbdPVjXcdQ/TlkTI9d92QNUOrN/I2+mCuM2jgs9erCCvtGDvuIlBfKazF6SOzcA36WoB0B0WGxEXPSIDYfvRM/64LwTV0IravOw/UrkH5JzLou5co91hY222yzvnXeLnkxSH7VdbHr8Y3AJO0IqI84FIYLiXEEZKGk7R2+MCfwlVxburodtPiyl0UbfXSI5IHxj0UqvPfrwm233VbuKCXpdbzggguWXxXXfTnMIpdOyzWOkuutt161o1RdeXbPEBgGAjl4FjpZEHX7ufxmMUiCfDUjz3SMQ4kbUniWdzlet65MHMRY5GYXADfk4FmOGNd1c6/ZRZSA0Uuc89w0OFIef/zxtcf6ujTX/eYrJa38kD9Y7LPPPuXX4meeeWYfjXvssUfBETM4LGsHAE0bypIs3FNuaHpfbMibXZBk9yjkLLsooYwhxz/zmc/0tS1pCNRR06yv5UgpHPb0fX2NfHYDjtsY17XzHO/wG5xxhn3ttdfc10oD4BJLLNHzHooojuMY3nEi0GVzfeKJJ/blE3JDHyt90UUXNb4qRmfGpLrx6/XXX++jTdMqzhnuDpw6jd7pF0JCeXoQbdkIyH8exPJOV77yPIXvQ/qh+zWgbheupf3cBR2dTh+FzaISTsP6Odfve9/7+iZVY93OsfI/lG5p05A4ljacxJgs64m1tAWyHWM/BiAdxqoP6C8ihcbll1++PLIA+uaee+6+fsSuadtss03ffXkfh8dc9UnhQY1v07Ve4BD6dcwYgMM/cwsdcsvc2HbQxk1NN9c4HujgI6Nx9nDzkd+ye2xdv5Y0ekE7tk6a5tg5HTtQuQYjaEQPQP+Quuiy9DULgjn1gWuuuabElTYRea7Lk2ufNvLViWLwf+KJJxrbH/ygvS0NTj6Ek046qTEf5vAh+qVgE9sX5H039pFROeqqy00Zt0LGI59+hCO68K0b6+ODYvQsfYSsm7f0I3Dp0ml8+kno3MYHm5xyULd/3bVPHX1o9pUL0BDSl0ivv8J32xOnCjcwN9KLDrzDXJlFB/SjOvue1mFZvOADRfcjLj6gYMymT7p08MELYVi6CWVBpzhvMLbgYIUTLvof9Nd96MjO9KEhhgexE8SEQfSNYbVJVzkuHjFjSsy4yk5Dbn/Vv+XDR5e+ut++9nd0GF2Ge41DS265Ml74NtZmJe3BR5U4H8smB9iWQkPouOUzTggNsfXz0Ykoo2vcHvS8DRpidHy9W67LD/xmsT0mhLZlUxk55wk5dZjYebKuZ4hdalA2lBz6fExb43zD2ok+hjb2eOBYXSC3rNdtW3fdNRbnkjW67JDxPMR2osvgOsS+K+/GtluKLI2hE3pTxnGpr288aLmgj3bVMn+ttdYqScQhVX84qtNgy+zqx249Q/TnkDGdcmLXPWXTEuYkucMojcO6bm1YtdknZO3R1z4iZYbwWqweEjP3wF4vwZ2T677ONXNonAPhibYQUledz5NPPtm3YQzzZtZZmf/ro4GFNnedt0teDJJfdV3senwjMEk7Ao6XpmObXbbTvuqqq8qvFvl6nXs+4Zlnnik4yggFgK+TtSBseh9nDxaH+JIdZbJLEDblY/cNgUkVgRSe5bgStg7mCxO+ZIffu7bOHybPQgtf1uC8jJMbX/hx5ApHlqcGJl18fYG8wumx7uiWpjKYTOMYfcUVV5RH3/KlSdtxM6Hpm8qV+2Agu76w0OM6uJCOMl3FW3YqlHxyx2xpDRZgijx3t7huKo+jhDHKM37YGFCUY27sONyEcc77KbzjQ0cq38f2Qx/a2tJQLrIKp1xklXt0p/tuiux284r5HSP/KWcYdKfQxrb34I+jLYZAXx02BkP3Hd8+gDGKMZcFbK5HLaTyYFt99AIHuxCwyyvzAByzcaRnQWJYYdTbIQaHXHWKmdNBL0ZZdCqcL2hbn7mgrmcufYBdMjFs4Zw4zJAL/2HS3FVWbF/oyneYz1PGrdjxKKV+g9azUmiTd3PPbSTfiRwPsi9x1JDY4f4/e+cBtkdR7m8gVAMo0ns7CBEUDggIalBEypGuoHQOEEqoRinCgYBIsf1REVCEIEUMVUpAURApihQREaQTilISQk+AUOZ//facZ5l3vy2z5S3f+95zXd83++7OzjxzT595dkZjsSptqXbgVb2tMVlRH7Yb6bSrd0RolhLJ73//+2inf1vY0MeNVUyny2A780aV+LfznbJtSj+2q03y7eVy2+SclT52VbnWR5ZVTdPtVpPxqxonvdfOcZsvV90+vu9X3eu6admtcUJRvJsaJ4fOSZg86id3cw7F5Eizq6S16kVTOvJPhEjzP+9ep/sCebL04rOy7XnVOJSd362abnXr0rJyGo9OteP9VC8Yu3b1n8uue0oO+2Chyi6kFp8iu5faYZO1LCt7r45dtayFhlln7OErAmqTGa1NaI1IHw1Jwb2sqRJX1WU6vUDz7NLvUXzKmrL1RYj/7SqvIWHjpvMEUATsPHNChAAEIACBPiOg3RBtgSPvayMpTPpbrz/wwAN9RoLoQAACEIDAcCLgL3DoOEIMBJomoIkv21HLdpluOgz8gwAEIACBzhFQvW474WsMrMXnLKOdEG2c/JWvfCXLGfchAIE2E2hyzmqDDTaIyrUdS95m0YO8bzJ+QQHiqBECvTxOYJzcSBJHH0aoH6DTXXSMKAYCEIBAOwnYqWLa3RAz2AR8RcCso38HmxCxHxQCKAIOSkoTTwhAAAIQaBsBbaNtCxx77713Zjj6snOppZaK3C633HKZ7ngAAQhAAAIQ6AQBFjg6QXmwwzj//POjfo+OiezkjqCDTZ3YQwACEGgfAR2TbmNf2XmKgGeeeWbs9he/+EX7hMJnCEAgl0BTc1Z23LN220k7CSNXiDY+bCp+bRQRr1MI9PI4gXFySoKVvPXcc8+5NdZYI+oHHHfccSXfxjkEIACBcgS0o+Mqq6wS1Tk6lQYz2ARQBBzs9Cf27xNAEfB9FlxBAAIQgAAEKhE444wz4gWOkSNHRsc4pHl06qmnxu70xTIGAhCAAAQg0E0CLHB0k37/hf3666+7Nddc022xxRbulltucVOnTnVLLLFE1PfRURgYCEAAAhDoDwKLLbZYPK4dO3ZsaqR0jNHyyy8fuZOtIwUxEIBAdwjUnbOaNGmS23jjjaPyvOCCC0a7fHUnJumh1o1fuq/cbZLAcBsnME6unvpq7w899FC3yCKLRHXGDjvs4HRCDgYCEIBAkwTOPfdct/TSS7uTTz7Zvfzyy+6EE06I6pzVV1/dacdZzGATQBFwsNOf2L9PAEXA91lwBQEIQAACEKhEYPLkyc7vXC6zzDLuuuuuc/oSSWbKlCnusMMOcyNGjIgGJOPGjasUDi9BAAIQgAAEmiIwbdq0eEFPO/qsvPLK7vHHH3fvvPNOU0Hgz4ARuOCCC6J+ju0UpY8jdL3RRhsNGAmiCwEIQKC/CUj5z+p62fvuu697/vnno0hrDHzjjTe6UaNGRW6kCPDII4/0NxBiB4EeJ1B3zmrbbbd1888/v9MJGPKr10zd+PVafPpRnuE0TmCcXC8H3nvvvVH7v/baazsd04mBAAQg0A4C//Ef/xGPR2zuabbZZnM33XRTO4LDz2FCQGPRa6+91ikv2HhVG7K88sorwyQGiAmBZgmgCNgsT3yDAAQgAIEBJXDzzTe7+eabL+5gqqM599xzx19A6veyyy7rzj77bI7GG9A8QrQhAAEI9AqBI488sqW9sskR2VJaf/DBB3tFVOQYRgQuuuiiIflKfR8dC4WBAAQgAIH+ITBz5ky33XbbDanzF110UTfXXHNF97X4stNOO0UfGfRPzIkJBIYvgTpzVq+99ppTue9lUyd+vRyvfpFtuIwTGCc3k+OkTImBAAQg0E4C9tGRP5950kkntTNI/O5xAjNmzIg3YvHzhV1rbIqBwKARQBFw0FKc+EIAAhCAQNsIPPXUU278+PFu9OjRboUVVoiOw9tggw2iHRImTJjAUQhtI4/HEIAABCBQhsDll1/u9ttvv8y/Z599tox3uIVAREBf3n7jG9+Idpf8yEc+EvV/WAQic0AAAhDoTwLaQfjKK69022+/vVtjjTWclABXXXVV9+Uvf9kdffTR7r777uvPiBMrCAxjAv0+Z9Xv8RvGWS86MWU4jBMYJw/nXIbsEIDAIBG49dZbo1NOFl98cfe5z33Oqf7GDDYBHUOfN9d91llnDTYgYj+QBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIDCQBFAEHMhkJ9IQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0C8EUATsl5QkHhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwkARQBBzIZCfSEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAvBFAE7JeUJB4QgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMJAEUAQcyGQn0hCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQLwRQBOyXlCQeEBhgAnfddZc79thj3R577OFOPPFEd/fdd3eExssvv+wmTpzoxowZ43bZZRc3ffr0joRLIBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEPAJoAjo0+AaAhAYVgReffVVt/XWW7tZZpml5W/EiBHu9ttvrx2XSZMmuS222MLts88+btq0abF/d9xxh9twww3dHHPM0RLuiy++GLvhAgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAp0igCJgp0gTDgQg0CiBmTNnunXWWSdSxFtrrbWiHfl8hcAJEybUCm/KlClurrnmihX9tOufmWuvvdYtvfTSbsEFF4yfK+yXXnrJnGBDAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgYwRQBOwYagKCAASaJKCjgKV8p93/HnvsscjrK6+80i288MJuySWXdE888USt4P7whz+0KPltt912Q/x7/vnnW9ygCDgEETcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABDpAAEXADkAmCAhAoFkCb775pltggQUiJbzRo0e3eC5lvDfeeKPlXpUf06dPd6NGjYrCmHPOOd3VV189xBsUAYcg4QYEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQBcIoAjYBegECQEI1CNw0UUXxTvx7b///vU8y3n7rbfectoZ8Lnnnkt1hSJgKhZuQgACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQh0mACKgB0GTnAQgEB9AkcddVSsCDh+/Pj6Hlb0AUXAiuB4DQIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQIoAjaKE88gAIFOENhzzz1jRcATTjihMMj33nvPPfHEE+4vf/mL+81vfuPuvfdeN2PGjML3Zs6c6W666SY3adKkVPehioDPPvusu+aaa9wdd9yRGea///3vKJzbb789003ygY5A1o6FDzzwgFMcs4yOUr7zzjvdtdde655++uksZ9yHAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBimBFAEHKYJh9gQGEQCUuDTboCLL754rAg4atQot9VWW8V/UoozIwW8Aw44wC255JKx+1lmmSW6nnXWWd2OO+7opkyZYs4j++2333YXX3xx9OyDH/xg/N7kyZNb3OlHniLgk08+6b73ve+59ddf3ykshbvZZpu1+PHUU09FbtZbb73YzaabbtriRmGcffbZUfwWXnhht9Zaa0XPpdC40EILxfJtv/32Le/pxxVXXOHWWGMNN/vss8fuJMcqq6zifvnLXw5xzw0IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgeFJAEXA4ZluSA2BgSTg7wRoCn1J+09/+lPE5q677nKLLbZYpAA3cuRIJ0W5gw46yK222motSnGjR49uYakd85J+6ndZRUCFlfQnqQh48MEHD3HjKwKeccYZbrbZZmtxs+KKK7qLLrrIjRgxouX+lltuGcdDux3utdde8XMpTir+G2+8cct7xx9/fPwOFxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw5cAioDDN+2QHAIDR+Dxxx+Pjtf97Gc/Gyu57bffftE9Hburv9deey3isu2228ZupADnm0suuSR+JmU9HRlsRsfo/u53v4uO8vUV+coqAj744IORP2PGjInDSioCmhvFwcLyFQElh5QFpeRnz2XPMccc0W/t7LfAAgtE174i4OGHHx67F4dXX33VohcdP2x+SZnwkUceiZ9xAQEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAwPAmgCDg80w2pITDQBHwlvxNOOCGVhY7RNYW3pCLgzJkznX/s7y9+8Yshfkgh0N6XXVYR0Dz87ne/G/uTVAQ0N6ecckrsxlcEtOeTJk2Kn0uWZZZZxt19993R46lTp7pjjz3WXXbZZdFvKRfOOeeckXvtBOgrAZp/2lXQ4iYlRAwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAwPAmgCLg8E4/pIfAQBIIUQS86qqrIgW58ePHu/PPP38Ip8033zxWhpOyXtL0qiLghz/8Yffvf/87KW78e/fdd4/j9fWvfz2+718ccMABsZv11lvPf8Q1BCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgMAwJIAi4DBMNESGwKATCFEEzGL08MMPuwkTJriVV145Vob7zne+M8R5ryoCaje/PLPGGmvE8Vp33XXdnnvuOeRvzTXXjN0suuiied7xDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEBgGBBAEXAYJBIiQgACrQTKKgLecsstbt99942O1LUjcX27XxQB3333XTfPPPPESn5+HLOuR44c2QqXXxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACw44AioDDLskQGAIQCFUE1JHAq622WqwYp93vDjnkEHfjjTe6rbbaKr7fL4qATz75ZBwnKf5dccUV7plnnsn9e+6558hQEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAALDnACKgMM8AREfAoNIoEgR8L333nM777xzrBQ3//zzu5NPPtm98cYbMa7tttsuft4vioAvv/xyHCcpAv7617+O48sFBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgED/EkARsH/TlphBoG8JFCkCTpw4MVaIGzFihLvpppuGsOiGIuDGG288RA7dOOWUU2J5N9100yFuJk2aFD9fccUVhzz3b3z4wx+O3Y4bN85/xDUEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQJ8SQBGwTxOWaEGgnwkUKQKOHTs2Vob7xCc+kYqiU4qAvpLf6quvniqL76auIuBnP/vZOO4LLbSQe/PNN1PDzLv5zjvvOO2qiIEABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQGB4EUAQcHumElBCAgEegSBHwwAMPjJXhPvShD7m33nrLe/t/L7/whS/Ebr773e8OeS4FOh2va3+TJ08e4ub555+Pn8vdSy+9NMTNpZdeGrvR7oSPP/74EDdNKgJedtllcXiSad99981U6nv66afd0Ucf3SLP8ccf77Sr4FJLLeUuuOCClmf8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQ2DLLbeMld2OO+64IS5//etfx8+lDPeDH/wgdvPHP/7Radc9U/CTfcIJJ8TP7eLFF19scfPII4/Yo9j+17/+1eJmypQp8TO7+Mc//tHi5mMf+5i7/vrrnRQLb7jhBnfkkUe6RRddNHbz+c9/3l6NbT8+UtB7991342fJCz1be+21Y/8Uvx122MHddtttkUKklCL//Oc/uyOOOMJ94AMfcHPPPXfsxaOPPupmnXXW+N2RI0e6mTNnxs+5gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDoTQIoAvZmuiAVBCCQQkAKfbvuumukvGaKfFKiGzNmjDv22GPjN15++WW34IILxgptcisFOrsne9SoUfHzxRZbzO29997uyiuvdC+88ILTjoJrrrlm/Fzvf/KTn3T777+/u++++9xf//rX6HrddddtcfPpT3/aHXDAAe6ee+6JZdHFJpts0uLOZJc9++yzu5VWWil+Psccc7idd97ZnXnmmZHynvxbZZVV4ud6Z6ONNorCkUJhmpk6dWpL/Cw8Kf3pz37L3nzzzWMvrrvuupZn8847r3v77bfj51xAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQmARQBezNdkAoCEEghsN5667UoqvkKbcsvv3zLGw8//HCLgp3czj///O7QQw912rnvnHPOGeLX1772Nafjcn1/k9e//e1v3XnnnZfrZuLEiS2yPPnkk+4zn/lMyzvaeW/jjTd2f//7351/NLCFt80227hTTz215R17ZraO8c0yr732mhs/frz74Ac/OMQPKfhtvfXW7o477mh5ffr06S0KhOPGjWt5zg8IQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgd4kgCJgb6YLUkEAAg0QePPNN93tt9/uLrzwQnfnnXe6N954owFfq3mhI3u1m+Cll17qrrrqqkjhsJpP5d5677333GOPPRaFqSOGdfyv7mWZGTNmuEmTJkUKilluuA8BCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC30gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwoVjCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAbxFAEbC3NhZYRwAAIABJREFU0gNpIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACpQigCFgKF44hAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAK9RQBFwN5KD6SBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQiUIoAiYClcOIYABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCPQWARQBeys9kAYCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCKAIqApXDhGAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQg0FsEUATsrfRAGghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQCkCKAKWwjX8HD/66KPulFNOcbvuuqv7/ve/P/wigMQQGDAClNkBS/AuR/fFF190F154oRszZozbZZdd3HvvvddliTob/Ouvv+7OOussd9BBB7mxY8dGLF577bXOCpES2nBJlzfeeMNde+217sADD4zyz2OPPZYSG251mgDtSKeJtze8GTNmuKuvvtodcMABlLP2osZ35xz1B9kAAhCAAAQgkE+AtjKfj/+038eL/R4/Py25rk/gb3/7mzvuuOOiMd35559f38Me8qGf49ZDmBEFAhBwzt11113u2GOPdXvssYc78cQT3d133904F9r3xpHiIQQgAAEIdIkAioBdAt/uYE877TQ3atQoN8sss8R/m222WbuDbav/kyZNcltssYXbZ5993LRp09oaFp5DoNMEKLOdJt5MeN2sl+qEffPNN7sNNtjAzT777HEbofbi3XffbQZMF3yZOnWqO+KII9ymm27qLrnkkkIJrrzySrfAAgu0xF8MNt988+jdc889122yySbu8MMPd5oA6IQZLuny9NNPu6233tqNHDmyhd/tt9/eCUyEkUGgH9uRjKgOxO3Jkye7Lbfc0n3gAx+oXc7K1o+9DrhO+9frceuWfP1Yf5BPupWbCLcMAfJpGVq4hUB3CfRjW9kuov0+Xuz3+LUrX3Ta314ZAx166KFu6aWXbhnT6WPUfjD9HLd+SB/iMPwIMDbITrNXX301mov217t1PWLECNfUfDTtezb/Jp402S436VcTccMPCEAAAr1KYKAVAY866ii30047pf49/PDDQWmmnYOy/NDuSs8++2yQP007UtjJQeZwVgScMmWKm2uuueJBs3avwgweAcrs8EnzQSiz3Yxj3bDPPPPMqI2Yb7754np1uCsCjhs3Lo7LbLPN5jR4zzK//vWvndwoznpvqaWWit9dZJFF3N///vf4t9x861vfyvKq0fvDJV30taX6GMl+RlMTL41CHSDP+q3vN0BJlxrV2267rbFyVqZ+TBWmh27Wbf96KCo9JUq/1R/kk57KXgiTQYB8mgGG2xDoUQL91la2E3O/jxf7PX7tzBud9LtXxkBrr732kLmTgw8+uJMo2hZWP8etbdDwGAIZBBgbZIBxzs2cOdOts8460Vz9WmutFe2sqvl6+5swYUL2yyWe0L6XgFXBaZPtcpN+VYgKr0AAAhAYNgQGWhFwzTXXjDsL1mkwO/QY3X/84x+ZfsivW2+9tauZ4bvf/W4s33BWBPzDH/4Qx0Nct9tuu65yJfDuEKDMdod7lVAHocx2M45Nhf3AAw+01K3DeUfA0aNHt8Tl/vvvT826+mJsoYUWitzuuOOOkRsdxfuVr3zFzTrrrO7II49055xzTotfGlx20gyndPF3K0MRsJO5JDusfun7Zcdw8J7ULWeh9eNwINtU+zcc4toNGful/iCfdCP3EGZZAuTTssRwD4HeINAvbWWnaNbtx3ZKzqrh9Hv8qnLphfd6bQy03377xfNM/aIIaOncz3GzOGJDoN0EGBtkE9ZRwFoT1u5/jz32WORQJ/0svPDCbskll3RPPPFE9ssVn9C+VwSX81qT7XKTfuWIzCMIQAACw57AQCsCvvnmm+6ZZ55xO++8czwQM0XA1VdfPShxdWSgvWP2HHPMEW1H/PLLL7v33nsvyJ92OeqXCarp06fHRx3POeec7uqrr24XMvztYQKU2R5OnIRog1BmuxnHpsIeLgpnF198sRs7dqx7/vnnEznt/Z/6+s/a4fXXX//9B4krv1285pprWp7aLr7qGyy66KKRf/POO6/761//2uKu3T+GS7qIAxMj7c4N5f338/hw/gikfMz794265Sy0fuw2wZC6vqn2r9tx7dXw+6X+IJ/0ag5DLp8A+dSn0dvXSiuNRfSxEAYC/dJW1k3J0HJRtx9bV852v9/v8Ws3v3b632tjoH5WluvnuLUzj+J3ewiEzCu0J+RsX0PaTMYG6fy0HrjAAgtEc/RS/vLNSy+95N544w3/VtB1SB6hfQ9CGTkKyd9yGNouh6RPqF/hscAlBCAAgf4kMNCKgJak/+///b+oI6GdgEyJQLaOBswz2jlJXxzIrX883oc//OG81zr6rJ8mqN566y2nL2Oee+65jjIksN4jQJntvTRJk2gQymw349hE2MNB4eydd96J29ibb745LavF9yZPnuzkJk8Jf9SoUXFb/9RTT8XvJi9ee+0197vf/c698soryUdt/z0c0sUgMDFiJHrH7qe+X+9Q7a4kTZSzkPqxm7EsU9c30f51M669HHY/1R/kk17OachmBMinRqK37Z/97GfR+GHDDTfsbUGRriME+qmtrAMstFw00Y+tI2e73+33+LWbX7v976UxUD8ry/Vz3NqdR/G/WQJl5hWaDTnft9A2k7HBUI4XXXRRPI+///77D3VQ8k5oHqF9Dwcbmr/lY1G7HJo+IX6FxwCXEIAABPqXAIqAzjlTKtp0002j7YRNGbDoKMDrrrsu6oTMNddc7hvf+EbcIUERsH8LDDHrDQKU2d5IB6QY/gSGg8LZZZddFrevRYqARSkyc+bM6Phfa+f1VWEvmuGQLsaNiREj0Ts2i5O9kxZNSTII5azJur4p7oPoD/XHIKY6cYYABIoIfOxjH4vGIygCFpEajOe0lf+bzqHlot/7sf0ev8Eo1Z2JZT8ry/Vz3DqTOwilKQK9Oq8Q2mY2xaGf/DnqqKPidYHx48fXjlpoHqF9D0fdZP4OTZ9w6XAJAQhAYLAJoAjoKQJuueWW7sADD4w7FosttpiTBnqW2XHHHSO32223nTvhhBPi90IVAaWAcOedd7prr73WPf3001nBpN7X1yHasfC3v/2tu+eee9yrr76a6i5rgkq7Gd5///3uN7/5TSRD3u5Jvsdy98QTT7i//OUv0bv33nuvmzFjhu8k+Prtt992d9xxR7R7k7ZxLjJS4LjpppvcpEmTcsPUdtD//Oc/Iza33HKLe/jhh53CKmN0LKQUPbPk0rHPt956q/vTn/5UafvpMrLgdigBUwSkzA5lk7zTZJmV39qRU7ukPfTQQ071SJ4ZhDLbyTgmWYeG7b+nutDajrvuusvddtttcdsl5biiNJVf//rXv6L6UfWfZKhiQvORjgJed911YxmLFAGffPLJqI1Qu5hm1NaaEqB2Ac4z2hFQbc7vf//73B0GzY86bXoT6WJydNrOmhiZNm1a1L6rvpgyZUopseqw9AMKzWf+O6HX2inyvvvuc9dff7374x//GJWL0L5UaBi+O5XNRx99NCp7d999t1M/MMv0ct9PfTTt7ixl11Be7eiTNdE2hvbFk+mk8q7+s8qG0jRvrGHvZpUzex5iF9WPWX5I3jL9dd+f0LJctq4v2/5VTSs/LqHXkk3jJLWxaUZ5T30ojTM0pipjQnmm+RnKoJfrD8WrTL0emk/6YezYVJskf/785z+7G264wf373/9Oy0qF9+r4UTWPNxX/ojaqibySBBiaT5Pv6Xed+jnNP/9eE+2k71/adVWeVfNJUobQ+uTcc8+Nxw9lFQE7OW5Kxk+/FUeVZ42htPtGqGlC7tCwku6effbZaJ5N7aT6vC+88ELSSfRb/SeNRzU/mDVvJzeat8tz43verbbSlyHvuk65rFresuQpUy6y+rF1xoudyKOh5afp+DXVpintqrYTL774orv99tuj/qrGvCHjlWReKZtGTedRk6cbYyALO2lXUZYryzEZZp3fZcIuE7fQer6q7FOnTo3aPo3JstqHEL+VJ8vOXeT5G1qnJP1oKj7yN7Tvk5TB3u1WnyJU7rLzCmnxfOSRR9yNN96YeTpMlba4TJtZZWzQK3kkjWfyXpX5sD333DPui2sNvo4pk0eabt8ld1PjmDwGTYURWu7K5G/Jndcul0mfIr/SGFVh0+42K01O7kEAAhBokgCKgAlFQC10mZKAbCnppRkNzOeZZ57IrSaVyigCXnHFFW6NNdZws88+e0tYq6yyivvlL3+ZFlx8Tx3Rtdde280xxxwt72pXwl122cVpIsU3ycUcNab6isKONLa4LrPMMtHkoP+uf60G74ADDhjynt6XMoWUIpML/erYaYFc762wwgpOCpJ//etfo3A233xzN++887bEQe7UGPtGflx88cWR/x/84Adj92mTmI8//njEYMSIEbE7i998883nDj/88NhrDWZPO+00t9FGG7kFF1zQbb/99hG7b37zm+4jH/lI/P6cc87pttlmm1jRUhOoW2+9dQt/pYV2hNRAANMZAr4iIGU2nXmVMiufpCj1k5/8pKVsqL47/vjjW8qGytayyy4bTb77EgxCmW1XHH2OWddlwjY/pECktkAKdXPPPXdcv1n96NtZioCaCN59993dIoss0vL+yJEjnZThkws0dfORZD/iiCPidtZknH/++aO2RO2J/k466aRI2ftb3/qWW3PNNWPZdtttN4t+ZGtR55xzznFbbLFF7EZ+brXVVql/n/zkJ1vaaHHPMlXb9CbSJUsm3V999dVbWBkz2RrM+0Z9Et1faaWVWpRBVfbVX/Df1fUxxxwTv+5PjGiiVf2mTTbZxPltsfo7mrhROuSZsiybyGd58qQ9+9WvfuU+//nPt+QPy5+LL764u/TSS9NeC7qnBRb18w455JAoLcRaC5wqv0sttVRL3tXvn//856n+drPvJ4HU1zz77LOjsrXwwgu7tdZaK5JTH58stNBCcTzU95LpZJ+satsYCfp//8r2xe1dlQ3VwzZ+sHyjPvzYsWOdZMsyfjnTopwZjT+S5VO/995778iJPoYpqh9Vv9Xtr5s8Zpcpy6F1fZX2r2paWTzSbMmhBRB9POaPb6SwoN3l1S5a2i6//PJu4sSJkTeqT5VeumfPZa+44orRx1lpYdm9MjztHbPLMuh2/VG3Xi+TT/ph7FilTZKC3xlnnBHlV9XJqouluKox7myzzRbnT43xv/KVrzhxSpom/PD9rJrHq8S/bBslOcvkFT9eWdeh+VTumq6fs2Sy+020k+ZXll2VZ9l8Urc+0cdT6k/7dbb6tMl2Nzl+6sa4yWetj5nUl0yO2xSPUaNGOR2vlmbKyp3mR9V7mkvTWC3ZRhr7VVddNf4QUfOvGleoj2nP/b6R0kNu9thjj5Z+p8YpWaZbbWWWPMn7dcpl1fKWlMF+VykXfj+2znixE3m0SvlpKn5V2rQm2wmVQ8ngz4lbGVN/QXP2EyZMaPmrW/8pXzWdR+Vnt8ZAVk6y7FBluTJ5XetIyXbJfquuNPO3v/0tWv+wZ2bbWMXclQnb3pFdFLfQet73s8y1xlr77LOP0/qW5VuzV1ttNXf55ZenelelX5jqUcbNKnWKvKoan7p9n2Q0qspfNh81IXfovILqLc2zHXbYYVG/SGVBa4+K61e/+tU4/2hcJEVoM1Xa4tA2U3OloeufJk+v5BGTp8iuMh+mD2i1jq25VivP6sv6c/n6yDjUhOYR86+p9l3+lR3HmAxl7LJh1C13oflbZS6kXQ5NnxC/ktzKsml3m5WUj98QgAAE2kkARcCEIqBgr7zyynHnQpPfaeass86K3Cy66KLRF3YhioDaOW+vvfaK/VYnRhPvG2+8ccuCuRRukkZfg/hfP0h5bcyYMdHisBbwrTO03nrrtbzqL+bIjd+BsXfM1sKkFmOTRgsD2h1R7rS4JZkPOuggp4GUvSt79OjR8avqjH7oQx9qeS43/mSd/65dH3300bEfulCHxJ75dlIRUAO3JZZYInarRbX//u//dp/73OfiYyCl9Cdz8sknx/d8P6Vg4v/2rz/72c9GiyX+veT1Kaec0iI7P9pHwFcEVCiU2VbWVcqsfDjuuONSy8ACCyyQel9lQAq6vgLyIJTZdsSxNQWzf4WGbT6oblTdZ/WV2gDV1ZqkU/smZU57Jjs5kSt/tDBiSkhaFNb7Uv7235UyhL7okmkiH8kfKfYp7ylMk1FK3bpnfyeeeGJL22jukoqAWdzMfZGtCfakqdOmN5EuSXmSvzVRv84668TsFEdt1b/TTjs57WprJqlMrQkxM2rL5f6jH/1o5I/SQn2WCy+80Jy09CuSCv5Jruo7pJkqLJvKZ2nyZN3z+3Dqe0l5S30iX/FHcdZX42WNvjbXBGSSWdHvZL9J4Xar76ewpVziK5JIfvXJtOjsK4fqvnb17WSfrGrbaGlZtS+uj1yU960uUznRRyYHH3xwXLbEQ/e1S2ua8fvv/mK3vrz1d03V+OALX/hC9LGL/PHHDpaX/Pqxqf66yVylLIfW9Vn1eHJMIFmqppXFI8vOKqd5ZVcfFUkhI68vpfGk/E6aKjzNj6oMull/NFGvh+aTfhg7VmmTvvOd78R1kdUJspP1tv9MHwQ89dRTlrVcE36YZ3XyeJX4l22jJGeZvGLxKrJD8mnT9XORTHpet50MCaMKzyr5pIn6RH0X1d1+GyxFQBuHmO2Pn7o1bjL22qXaV4L4zGc+E33Qp3bI+iGy9XGGb8rK7b/bxLXmQK3e0Ydr//Vf/+V23nnnloVf7eyneQdz59t+30i7P/nP7DpNEbCbbWUotzrlskp5K5KrSrnwy1DV8WIn8mjV8tNE/Kq0aU23E1KetfKi+R3Nu2qtI28e3/+Yv0oatSOPKg93egxUVG7seZGynNyV5agNC/TBq8aBln4af+gDj9NPP92Cjj48kpKTKdRozUabNKhuNVM2bHtPdlHcQut538/Qa+1m7SuSa71KY20x8OdqNHfjt9lV+oWhMsld1Tqlanya6Pv48asqf9l81JTcofMKVgasvMhWWdGaqn9P19qZWKZqWxzaZmo+PRm2fqfNdUieXskjfn7Juq4zH5ZWlyc5+XPYWTLY/dA8Yu6baN+rjGMs/FC7ShhNlLvQ/K16Ny0t/blJxTU0fUL8MnZV2OjddrZZJhs2BCAAgU4RQBEwRRHw29/+dtz50iSUv2BuCaMJNXU8xo0bF90KUQTUjnTWWdl2223jXebkgb5YtWdaKNU21L7xG2dN7OlIKTNShLB3ZWti2UxyMUdf0o4fPz7aefBnP/uZs3jY+9oRL2kkqz2XAoBvLrnkkviZ3Njk2vTp06NBlwZe/s6HUjaUUoF241NnQQvY/kSQrv0jB9RZ1NFp11xzTUs4yY6wdj4xGaUg6Zsf/OAH0TNTBLzyyisj2T796U/H7+hdDRQlzwUXXOB++MMfDtn9UOmiQbKe6Ys5sdLg2sLVV5OYzhBIKgJSZlu5Vymz8uGyyy5LLRtSAjr22GOjekO7PEkx1vK9bClymBmEMtuOOBq/Ijs0bPkjpQJN4FpaSVkkqeytL+fsuWx/Ukx+aOc4241V9bOOozejel5Kgfa+DeCayEcWhmxfwSLtaGAdc692QoqNSVnMH31dKYU3fS1tbtQ26V7anxTdzJ3sNEXAqm16E+li8SqyNeD1dx8777zzhryifowfV30VmzQ//vGPIzdSLEwaf2LE2njtKqrdBI488siWxVPtfKYFi6SpwrLpfJaUKflbX6L6nDTxZkZlwVe61K5gZY0WItVnSvabpCAkJTLlXfW5NMGeVKjTl42+6VbfTzKoLCoOUvLzedlO1tr92hSi5KaTfbKqbaOxrdoX9/O34u5/Va6vTLWjt7HSAo6+bk0av5z5i91yp3613tci3YMPPtjyalH92FR/3QL141p2rFNU15dp/6qmlcUjy84qp6r7pLBw6qmnRuMIP00tbZdeeml36KGHOilvajdPX4FTbnwFawu/Ds+qDLpZfzRRr4fmk+E+dqzaJqXVuabkr7TXTg0aO+vDAcu7sjUpb6YJP8yvqnm8avzLtlGSs0xesXgV2SH5tOn6uUgmPa/bToaEUYVnlXzSRH1i8bF+sMpC3tHA3R436Vg4X2FHu2r4xp+70gKamSpy27tN2Jq/NGVk9XHt4zL5rXGT7WwoZRWNyVSO9adxhdVTft/Id+MrgNhcpS9zN9tKX4686zrlskp5y5PFfxZaLvSO34+tMl7sRB6tWn6aiF/VNq3JduL888+Py5PqEf9j36R8mndRX0F5QGMZmapp1K482ukxUAQh4F+RslxVjgra1j5UL2pMmTaHJXc2TtfpGr6pE7b8yYtbmXrelynkWnL7GzsoT/lGilz6oNjaC+3qbqZKv9DeLbKr1il14tNk36eO/L0+h/y1r30tmrOy/oXlDdnKK/7GJ6YIWKctVl4pajNDxgaW53olj5g8Rbbfjy87H6YdYzVn769Dqa7x5/G1k2NZUzT3ZP7V7b/IHz/+ZefITI4iu0oYTdYXRflb8he1y34ci9KnjF9V2LSzzfLjyTUEIACBThFAETBFEVBKZva1rDqByaPXHnvssfi5tsCVKVIE1MKcTbzpy5NXX311SBprxxTrfKpTY0aDbntXz2+66SZ7FNm+IqAUNfxFdn8xR0fZJY0G7ToC0cKVolvS+JOJSUVALYhZB19+/OIXv0i+3jLpc8sttwx5rkUzC1+2OnlJow6x7yapCPjxj388fp78utmUCE0R0PwuYqOJRj/MtB3/pBzlu0nbzcPCw26OQFIRkDLbyrZumS0qG1IW88PYddddWwVwLjrm2y8b/Vhm21EvDQGZcaMobL0mBU1LAykqpx3LWqQIqLQ1P77//e8PkcafMNbCjY6KM9NEPpJfRQNAC0+LXSarKSXaM7P9SWwpJmWZRx99NPZLfiYnUeu06U2kS5bcafe1o4Zx0fHOvlEfwHZ7NDfaYTVppNim58nJYrnzJ0bS2ngpFZjfsvUlrW/qsJQ/TeUzX6a0a33A4MfDVwSUe+2QZM+XW265NC+C7/lM0xYtdWSI/5HF+uuv3+J3EZNO9P38D1zERR+x2CSqJpXVf9LEk5kimZvok/ntVtn+bNW+uD7c8fvwaUcSqS+t400s/2inhqTx84S/2P0///M/0Xvqi+u4pywTUj/6YaSV5aL+et2yHFrXF7V/VdMqi13W/SJe/m68mvBOKtprzOAramuC0jd1eNZhUFQWO1F/FMlAP9RFH9VZnSG7bJvkM05TXtd8QvKDQS2W+qauH3XyeN02uUwbVWWeweeUd11Un9m7RfVNUf1s/hTZddrJIr/teVmedfKJwvTzadp8WEh9ErK4pbC6PW7yj7PTyRhJ4/dHjjnmmPhxHbljT2pcqG9k9ZmU5JNm7bXXjp77u1bJjV8u/L6R/74/V5nsU3e7rfTlzLuuUy7Llrc8OZLPQsuF3vPTKq2PWTRe7EQerVp+mohf3TYtRIaidkJrAVYO03ac9/OSr0hs+aJqGvn+hs7lW5ghdifGQCFymJs8ZTm5qcpR7+rYUv+DQZWrpHnmmWeieQTNhUmZyDd1wpY/eXGrWs/78mVda+7P8u5//ud/tmwuYe9owwdzo/W+5FGiZfqF5meRXbVOaSI+TfR9qspfJx81IbfSJXRe4ROf+EScL5Q/9NGeretprkp1oY44lqnTFuv90DYzZGzQK3kkAlPwr6n5MF8RU2vwdU1oHqnbf6k7jgmJZ90wmih3oflb8Qlpl0PTp8ivqmza2WaFpCluIAABCDRNAEXAFEVAQfZ3OdLOcb7RjnrqIOpYODNFioBafLdBx9e//nV7rcXWMX7mxj/i19+ef6211mp5Rz+0BbLk1Zf62mXCN35jvtlmm/mP4mspuFm4/tf+5uCqq66KFmwVbyl9JI0/YaDwksbvNKVN0D3xxBNx+JIjbcenoo6wr8igrxP9xTZ9yagOfHLCqYiNdib0Ox6aOEma5MK0f2xS0i2/myOQVASUz5TZ9/nWLbNFZUMh6ehtqzfSdkYYhDLbjji+n4r5V0Vhq97zFz7SdhpSCHmKgFLKNqV42b6SuUk3ZcqU2I3yw3XXXWePWhbestqfonwkz/x6OK19sACLBoBypwUfy7d1FAGrtulNpIvFN9T2FxbUVvpGPMVDxzzbLm36rcGymddffz3ecSM5YSo3RW283Ph5UfWTb6qyND+aqK/MrzxbeV2KkOoL6c/ffVnvScHR8paY1DEhTP0JVoXr7zIXwqTdfT9/Ml1l2FcSTmNTJHMTfbI6bWPVvri/M5x2d0wqFRsLP/5S8kzu3pqWJ7Szt9Jez5KL4+av2SH1Y1oY9r7sov563bIcWtcXtX9V08qPa8h1ES+/jGlXjjTjKwtKbt/U4VmHgZ8Xs9puP27tGDuGyFDUfyjKJ8N97Fi3TQphrBMKbJcu1TVf/vKX/Swa1M/L86NOHq8b/zJtVJW80gIq50dRPrVXi+qbovrZ/Cmy67STRX7b87I86+QThRmS14vqk5DFrW6PmzQPZf1Q2cn+tljsv//+0YcHO+64Y6wAUlduS9c69llnnRXLrhNZkh8NqY+j+bwXXnihJZiiciHH/hgkqQjY7bayJTI5P+qUy7LlLUeMIY9CyoW9VDY8Z+4hAAAgAElEQVSt/PzbiTxatfw0ET/5UbdNkx9FjIvaicUWWywuh9qJPml8JS9tJuCbOmnUzjwqGTsxBvJZFF37HLWLvm/qcDR//I9AVdcnjW0k8aUvfanlURNh58Wtaj3fImTKD33Aa3OWavvS1m30mvpb/nyXZPVNmX6h/17WddU6pan41O37VJW/bj6qK7elR+i8gq8ImPwoz/wyu05bLD9C28yisUGv5BHjUmQ3NR/WC4qAaWvair/f1/T7L3pWdxxTxLeJMJood6H5W/KGtMuhZbjIr6r829VmhaQnbiAAAQi0gwCKgBmKgNoF0J9I0y6AMtoFwY5a1CKImSJFQCkNmn/6ylVf0CX/tJBibrRYaMbvmKYd3Wvu0uyQxtw/KlFfT4UaHV02YcIEp92DTG7tiJM0RRMScu9/WfPHP/4x6UXh7mLJBfFPfepT0TbRQzzyboSw2WCDDeK4pQ0otcOWxV22f4yJFxSXDRNIUwSkzBZDDi2zIWXDVy7SFu1JUzR47Ycy2444Jjlm/S4K2/+qXrs/+Eeu+37mKQLquFGr36Q0l2yz7Le5kX3GGWfE3jeRj+RZUwNA+dWUImDVNr2JdIkBB15oN04/jXxlvrFjx0bP9KWrXyb9ttzygY50TTMhbfxKK60Uy6AjBX1TlaX50VQ+M//K2lKUvP7666Pje42zdvmqY0KY6igMC0/2RRddFAcZwqTdfT9/Ml07XheZEJnb1ScLaRur9sVXXXXVOJ2kaJBl9KW0n546Wts3yTxx9dVXxzs++Ee2++/410UTZHKbDMN/367z+ut1y3JoXV/U/lVNK4tjqF3E68Ybb4zTNEsRUOM6S/dk/qjDsw6DkLLY7vojRAb6oek5NbRNCmGsEPwFZe1c6pu6ftTJ474c/nVo/Mu0UX7/SOU1ZJ7Blynvuqg+s3eL6hu5y6ufzZ+qdkg7Gep3WZ5180lIPi2qT0IWt6y/rDzSjXGTn6elFKG8FWLqyh0SRpEbKQ5YWyhb/Wh9fKNjT/NMSLnwF2eTioC92lbmxdl/FlIuy5Y33/+i65ByYX6EpFXWeLETebRq+WkifuZHmh3apundEMZ57cSSSy4Zl0Pt3J40vjJHcv6vThq1M48qDp0YAyVZ5f3OU5arw9HCnDhxYpyOOhpdeciM1rPsJKrkLs9NhJ0Xt6r1vMmeZV9yySVxfNV+5K3J7LDDDrHb5CkYfh0QMneRJY/d9/0r0yY3FZ+6fZ+q8tfNR3XlNv6h8wp+PyBt4xPzL88OaYv1fmibWTQ26JU8ksfEf9bUfFgvKwJm9V/Eoe44xmeZdV03jCbKXWj+VhxC2uXQMlzkV1U27WqzstKQ+xCAAATaTQBFwAxFwJdfftnpa1SbkNLuLzI6llf3tFvH888/H6dPniKgdqfzj34yP/NsDdhkku/+9Kc/jcMMuQhpzLVTlMlSpAioXfX23Xff6Ig3e8e3feUBky9kQkKKj+ZPFUVA7YSjI5fND7O/+MUvOil+pJkQNprgML/SFAGVPvZcdt6gM00G7lUjkKYISJlNZ1mlzIaUDSl8Wd5PTgRKkqLBaz+U2XbEMT0Vh94tCtvKiNIoS4FLvuYpAtrXwpbOIfb3vve9WNgm8pE8a2oAKL+aUARMtsshXKxNbyJdYsAlLvzJF2vLpBy6yCKLROVY+eDXv/51XKb9o2b32muv6L4G2GkmpI3X0dTGyVcErMPSZGkqn5l/IbaOB9FEh46Y849Zszh2QhFQO8v5R//4/a8QJu3u+/mTxyGT6SEyN9knK9M2JvNpaF9caeTnDxtLpOUxHUFj+Ue26l/f+OVM9Yj91sJCWr/Zf1fXRRNkcmN+Kvysr52z+utJRn5csq6tXjRZQ+v6vPYvKUdoWpkMZewiXkoXi3uWIqCUsM2NrwiYjIe5ybONZ/LdsgxCymK7648QGeiHvp9bq7RJIYwVgo4PtXynPO+bOn4k86mFkWdbHvdl0HWV+Jdpo6qMWZIyZv3Oq8/8d4rqG7nNqp99f8pcl2kny/hbhmcT+SQknxbVJyGLW90eN/nha2evUOO/l1f+/Gf+eC80nCJ3Bx10UFzXWFhSWtLH1/r4Ns2ElIssRcBk3up2W5kWv7R7ZctlmfKWFl7evZByYe+HpFXWeLETedQPo0z5aSJ+5ofZVdo0vRvCOK+d2GmnneIymNzxTwpk/rvJo4N9flZ+i2yrR9qZR8Wl3WMgS7dQO09Zrg5HC19jSr/e05G4Zm644YYojbW5hdLUN02EnRc3hVWlnvdlTLs+7rjj4nyrXayzPoDWu9rxzfLlXHPN1cKgTL8wTY7kPZ9nmTqlqfjU7ftUld9/z1gX2VYXiGFduS0dQucVqioClm2LJVdom1k0NuiVPGKs8+wm58N6WREwq/+S7GsWlQU9zxrvZnFuIowmyl1o/lY8Qtrl0DKc51ddNu1os7LSkfsQgAAE2k0ARcAMRUCB32677eJBggZKMtoBSQ2zjsP1TZ4ioJTD/MZeX8g888wzuX/PPfdc5H1y+/7zzjvPD7bwOqQxD1nM0Zcxq622WhwPTQIccsghTrtdbLXVVvF9fyHahKs7ISF/ijrCcqNdj/zjYY25BoMHHnhgy3HBch/CpslFZ+OBXZ+AKdNsueWWLZ5RZt/HUafMhpSNogWTQSiz7Yjj+ymYf1UU9j777BPXy9pFK8vkKQL6x2TpuPqidkvP/S+Om8hHkruJAaDFvwlFwDptehPpYnEpYx922GFxflCbLaOvwNVO2gcAM2bMiBcQ1G7qYwdNEC+xxBKRu+QuGhZ+SBufNTFSh6WF31Q+M//ybOVxHS1gH3dICUvKgGeeeabTrmzW7+iEIqDk9MuGJgTNhDBpd9+v7GR6iMxN9MmqtI1V++LJ3Th/9KMfWRKl2r5i5ze+8Y0WN345s3xmto72Tju63fcgb4LM3PlhlFUEbKIs+/k57xj4vPavaloZgzJ2Ea86ioB1eNZlEFIW211/hMhAP9RF/bKqbVIIY5UHf1Jfu5z5C6x1/KiTx62c1mmTy7ZRZecZTMYiO68+898tqm/k1lfSCFEQ9/33r6u0k/77IdehPJvIJyH5tKg+8cvBhhtumBrFbo+b/KOvbO4yVdDEzbpyJ7yr9fOHP/yhm3/++eP+tPVzll9++SHHBSugkHLhK8T4Y5leaStDgdUpl6HlLVQWcxdSLsxtSFpljRc7kUerlp8m4md+1GnT5EcI47x2Qu2ixrYqdxqT6LcZv45Uf11HU/qmbhq1K49KxnaPgXwOIdd5ynJ1OVr4Y8aMievRTTfd1G67r371q9F9KWslTRNh58XNwitbz9t7WfZuu+0Wx1VH/+aZ448/PnarvP7aa6/Fzsv2C+MXMy6q1ilNxadu36eq/HXzUV25LTlC5xXKKgLWaYtD28yisUGv5BFjnWerbre+nOw682HDURGwiXFMHl89ayKMJspdaP6WzCHtcmgZzvOrCTZNt1lF6clzCEAAAu0igCJgjiLgVVdd1dJhue666+KJKW3F7Js8RUDtVOZ3fLTrTqh59dVXW97VwKWMCWnM8xZzpASw8847xzJoYu7kk092+srMjK981U1FQJNHRy/qC0afua6luOibEDZNLDr7YXLdDIEsRUDK7P8eX163zIaUDX8yUOUkaYoGr7774Vpm2xFHn0vedVHY2rnV6kBtU59l8hQBVWeaH6uvvnqWF5n3m8hH8ryJAaAJ2YQiYJ02vYl0sbiUsW03Y6WnFsS0kL/HHntE6et/gbvNNtvEaX7WWWdFi256R8qAya/GLfyQRYeshZ06LC38pvKZ+Zdl//nPf3YLLbRQzEf13t133x07v//+++NnnVIEnG+++eIwzz777FiWECbt7vuVnUwPkblOn6xOf7ZqX1yK0VaHys7bEVC73fhuxcM3fjm7/PLL3TXXXOOksGvvaNI9z+RNkNl7fhhlFQGbKMuhdX1e+1c1rYxBGbuIVx1FwDo86zIIKYvtrj9CZBj0fmjdNimEscrD97///bieWXrppVuKSB0/6uRxCVE3/mXbKIt46JjF3BfZefWZ/25RfSO3eQoevl9Z13XaySw/i+4X8aybTxR+SD4tqk9CFre6PW4aN25cXFa1G7F2wQgxdeUOCaOMm6lTpzp9wDTvvPPG8VFfR32Exx57rMWrkHKRpQjY7bayJSI5P5osl0XlLUeM1Ech5cJeDEmrrPFiJ/Jo1fLTRPzkR902TX6EMC5qJ04//fS43GmcsdZaa7mPf/zj8T2NhdM+1mkqjZrOo+LS7jGQ5YFQO09ZrimO2q3Mxoh2mtULL7zgtAuePup49tlnh4jbRNh5cfMDLFPP+++lXevjOYurFFj9D1aS7o888sjYrXbd8ue3qvYLk2HY76p1SlPxqdv3qSp/3XxUV27jHzqvEKoI2ERbHNpmFo0NeiWPGOs8u8n5sOGoCNjEOCaPr541EUYT5S40f0vmkHY5tAzn+dUEG8nbZJtVlJ48hwAEINAuAigC5igCzpw5s2XR1wbW+spIHTPf5CkCyp3fgKlDXcboOCkb2GiRvowJaczzFnMmTpwYh61BlZQJkqbXFAFNvssuuyw+9lD8NJmoNDUTwqbOorOFg908gSxFQMqsc02U2ZCyUbRgUjR4TcsVw63MtiOOaVzS7hWFLaVsaze0GOR/7er7l6cI6A/k5IcGP2VME/lI4fntZ9rEs8mUNwA0N00oAiZlKtOmN5EuFpcyto5k+NCHPhTnCSmoqC+jCf5//etfsVfaddjyzRZbbOGOPfbY6LcmdbOM9Y30XpbyUNbCjvz007cMS5OnqXxm/mXZOmLb2GyyySYtE8d6p9OKgFOmTInlkVxaQDITwqTdfb+yk+khMtfpk9VtG6v2xXWsneUbKd9mGS1umzvZUvTzTVo5+/rXv97yjnYczzIh9WNaGEn/8hYQ65Zl//28ur6o/auaVsm4Fv0u4lVHEVBh+zzK1o11GISUxXbXHyEyDHo/tG6bFMJY+XDs2LFxPbPxxhu3FIu6ftTJ43XjX7aNaom4c65ozJJ0n/W7qD6z94rqG7nLq5/Nnzy7bjuZ53fRszyedfKJwg3Jp0X1iT8mytoR0HfTjXHTqaeeGpdV9SP+/ve/F2GPnteVOyiQCo60M/kOO+zQEqeTTjqpxSe/XPj9YN9RliKg3HSzrfRlzLtuR7nMK295siSf+Xknq1zYO35alR0v+uFUKVsmQ55dtfyYn3XiJz/qtmnyI0SGkHbizjvvdPoIVPWIlMaWWWaZaAd8zWNkzSk1nUZN5VFx6cQYyPJBiJ2nLNcURyktaWdYG1vKXx2zrt9f+tKXUsVsIuy8uKUFGlLPp73n3zvttNPieCp+Tz31lP+45XqXXXaJ3a699totz+r2C1s8c85VrVOaik/dvk9V+evmo7pyWzr4fce8eYVQRcAm2mKfTV6bWTQ26JU8YqyL7Kbmw4ajIqDY+Hmx7JxOEVt7XjeMJspdaP6WzCHtsh+nvDJc5JfvT13+TbRZlmbYEIAABDpNAEXAHEVAJcYBBxwQDxRsEJW2IF6kCOgvXuorOnXsQo22crewtWj/0EMPhb4aNPGZt5jjT/6rg5xmuq0IKCWGT33qU9GxzUn57rrrrpidGGpbYDMhHR0/3TQQShp9aW1pk/Q/6ZbfzRHIUgRUCJTZ9xfsqpbZkLJRtGCSN3jtlzLbjjiGlpK8sOWHdp716ybt6JJm8hQBfSUG+fWDH/wgzYvMe03kI3nuD9ySyjF+4EUDQLltShHQbxvKtOlNpIsf5zLX22+/fZwnNPGpNFU8fPPiiy86fTWuZ9rV7qMf/Wh0rR2Rs0zIokOeImBVliZPU/nM/Euzk0p3mihOmk4rAvqTs/pQQ187mglh0u6+X9nJ9BCZ/bxStk9Wtz9btS/u77K52GKLZe5SoGNSrM7WUUVPP/20JWdkp5Uz7c7tl61FFlnEKa+mmZD6MS2MpF95C4h++pSpFy2M0Lq+qP2rmlYmR6hdxMtvQ6VskGaOPvroON2TuzrW4VmHQUhZbHf9ESLDIPdDm2iTQhhrp9LFF188zqNSPvZNXT+q5vEm4h/aRlUds/ic8q6L6jN7t6i+kbu8+tn8ybPrtpN5ftuzKjyr5hMLMySfFtUn/uLWOuusY1632H6d341xk469tX6EbJ0QEGLqyq0w/I9tQ8JMuvnWt74Vzef96U9/ankkZRZ9mGTxSraTvpJf1qkrvhv/aGAF1M22siWiOT/qlMsq5S1HlCGPQsqFvRRSh/l92iuvvNJedU3k0dizjIuq5ce8qxO/Jto0yREiQ0g78fjjj8cfEN53330WxVy7ahq1O49K6E6MgXLhJB7mKctV5ZgIIvppH3Sq/lS7teqqq0Z1ada8ThNh58Wtaj2fFjf/3j333BO3EYqrlKTSjD6M1VjZ2pO99tqrxVlov7DlpZwfVeuUpuJTt+9TVf66+aiu3JYkofMKoYqAddpikym0zSwaG/RKHrF4FdlNzYe1UxEwb50hpG3N6r+ITd1xTBHfJsJootyF5m/JG9Iuh5bhIr+q8q/TZtUdF4WkOW4gAAEIlCWAImCBIqC+VrSBgtnJCSRBL1IE1Bdt9r5sHQ/ob0PuJ5wW/7QwZEbb4/vvbrTRRplf4r3yyist/oY05nmLOQceeGActnYT0oJA0nzhC1+I3Si8pAnpNBVNSOR1hB955JEofHUSkkaN79xzzx0919ejvvwhbPwOQ9lF56Qs/G6OQJ4iIGW2fpkNKRtFCyaDUGbbEcfQUpIXtvzQc3+iS9dSVEoaTe767UvyGKmPfexj8XN9yZe1u4TaswsuuMBdf/31cRBN5CN55g8A0+phC7BoACh3TSkCVm3Tm0oXi3MZ+9xzz43T0tL8zDPPHOLF5z//+RZ3WkDLG8iGtPF5EyNVWZrgTeUz8y/N1lE6Us4ybgozaW677bb4uZjUMUVMpQC23HLLxeHpCBbfhDBpd9+v7GR6iMx1+mR1+7NV++I33nhjnE7KP/7CpqWZ6t011lgjdrfbbrvZo9jOyhNaNPePCN56663jd/yLkPoxKwzfn7z+et2yHFrXF7V/VdPKj2fIdREvfyGkiiJgHZ51GISUxXbXHyEyDHI/tIk2KYTxT3/607he0s76WrD3TV0/qubxJuIf2kZVnWfwOeVdF9Vn9m5RfSN3efWz+ZNn120n8/y2Z1V4Vs0nFmZIPi2qT/zFLY2pkuMlC6vb4yYpe1hfVR+JXH311SbaENv/gKSO3OPHj49O3tDHDmobqpiddtopkluck0YLcRYnKbb4ZrXVVoufZe26nKcI2M220o9H3nWdclmlvOXJknwWWi70XkgdljderJNHk3Jn/a5afurGr4k2LVSGkHbC33H8V7/6VbTrfNaHRj7LKmnU7jwq+ToxBvI5FF3nKcvp3Soc08LUbvP+/IXq0eWXX75lvSj5Xt2w8+JWtZ5Pypj2e4MNNojbAh1nnbbWdvnll8dutDaU3GAjtF+YFn7Wvap1ShPxaaLvU1X+OvmoCbmVHqHzCqGKgHXaYssfoW1myNigV/KIxS3Pbmo+rJ2KgHnrDHX7L3XHMXls7VndMJood6H5WzKHtMuhZbjIr6psqrRZ//znP93o0aOjzRW++MUvOuloYCAAAQj0CgEUAT0lPinYpZmVV145HjDoOs0cddRRsZuRI0cO2fFDk4W2A49NZOmoCy0cSzlNfzrOQg2YOhlSXjOjQYwaEntPtjrWv/nNb9xLL70UhfXwww9HR/hpouvWW2+1V+Nj/fSOFlDTjK8coMk03yR3L/J3hNICl/8VrcKQQqRv1IGda665YtnVAUwz/rEcviKJudUuRX78NWFgxiYP9Pzf//633Y7s888/P35PDH3jfyGnTnSa8Qc+OgohaXQ0gi+XBtuY9hMwxVvK7Cyu6TKr1AspGxooWd7/9Kc/PSTRB6HMtiOOQ0Bm3MgL21455phj4jRSWkmZW4Ozv/3tb05Hvnzve99zSyyxRIub5HEvUlrxJxB1nKzqVatrtSisCeJ111038ufkk0+24BvJR/LMP9ZEXzKr3TPjXx900EFxXDRoSzN333137EY732Ut5D344IOxO7GT4pVv6rTpTaSLL0votSbwfYUhTYAqHyXNT37yk5a477jjjkkn8e9kG69+QZrRpLPVF5deemmLkzos5VET9VWLQBk/7IgkxWOllVZyzz33XORSizfK9/5Ehfo9aRPQGV4PuZ032TRt2jSnOtd4qgy/+uqrLX74TLrR95Mwfv9xqaWWyixrJrgvczv6ZL48Yle2P1unL77VVlvF6aWd8tRn983+++8fP1c/3vKWuUmWs2RfWkdcWH6Qfc4559irsV1UPxaFYR7l9dfrluXQur6o/auTVhbPIjuEl69gMP/886cqVPuLrTquyjd1eNZh4JfFbtUfvgxZ9cGg90PrtklFE/76uEP9BKtb0naWrutHnTxeN/5+m5DXRlWdZ/DLct51UX2md0PqG7nLq5/zZLBnPpMq7aT5k2dX4Vknn0iWJuqT8847Ly4LYuN/SCOFOuvzdXvcpJ2epABo5Vb9finq6cMvcdS46be//a1TverPo1SVW3NfFpZsjRMVTllji23JHZrkl/rcFsYNN9zQ4rWOuLRnirfSWotw//jHP6Lxqo4zt+eyb7nllpb3u9lWtgiS86NOuaxS3nJEGfIotFwk67Aq48WqeXSI0Dk3qpafJuJXt01LypAcK1i0Q9oJ1Q1+ubFrjWFU5qTw+84775iXsV0ljdqdRyVcJ8ZAMYSAC9VzxlS7jCVNFY5JP+y3P2egME888UR7lGrXDTsvblXr+VRBEze1lmYbQCieBx98cIsLnYLi532tuyWNX9fm9QuT7+X9rlqnNBGfJvo+VeWvk4+akFtpEjqv4Ne9P/vZzzKT088fymNl55LkcWibGTI26JU8kgks8aDufJi823LLLeO687jjjkuEUP5nSB5Jtq1V+i91xzEhMasbRhPlLjR/Kz5F7bLchKRPiF9V2VRps3bdddc4j4a0uSFpixsIQAACTREYaEXAb3zjG27DDTeMjr9TBa0/KX6pg+JrbX/729+OK/LkwEk792mRxFd2kz+rrLKKk/a3lAnMTJ061Y0aNSr2y8LUgMUftOj+5ptvbq9Ftibt/K9q7F0paMwxxxwtfqpDqK9/995775a46R0dDThx4sTIz0cffdTpiy1/gV7+yY0pvWli0x8wyQ8NiuyebD9O+hJY4V588cVRw77++uu3yKYdbHR0qylvSBHF7xDKfyk2aVH0qaeeclpg15c3a665Zos/n/zkJyM3mtT0Jw+k6KLdFtWJUcfCuM4333zxF1/qwKex+fKXvxyzkaKLdlUxzrK1E4L81o5SMv/zP/8zRLlzvfXWi+In2THNE6DMtq/MarAeUjZU5lRvLL300nH5kEKVOslaFBmEMtuOOIaWlpCwzS8NWjfbbLM4nfz6zK5Vr9m1bH0Refjhh5sXkZ1UDDP3qm/tWrYmh6Wg3kQ+8gXwB/wKRzsTfuYzn3FLLrlktIj1y1/+0mkXCL8tlEK+jo+yhWvthqh2RV8I+zJvsskmUZ1tO/2qPVEbZYqN5lYT4mqLfAWdqm16U+niMwq99uMlrmlGyp2+8qfa86TRDoGaYE228Rqs+228FDX8RTrx1IKe6hBrS+V3FZZN57NkHJO/jz/++Ja8o/7FiiuuGC+4alHV8otslT0xUnqXNb4ioPqlUhY66aSTov6Zv9OnWGoXXDPd7vupHdEHLsoD6gf7PFSGdD/5sUdIOjbRJ6van1WczJTti9t7M2bMaCkrUhpV/aQxhPqzxklKnX/4wx/sNaf31JdNljPrS6vcyMid8qL5oz6vFHh1xElR/VgURmh/3YSuUpbt3aK6vkz7VzWtTJYsO4SXlK6V1/1+ktJGHyTZ+EpjNS2W+YpWao+VL3wFkzo8yzLohfojpD6gH/q/ubNum+Qr8UlRVYqomvPQWEvjWesHSJFIu87qSLWkacKPqnm8avxvuummUm1U2XmGJKOs3yH1WUh9I/+L5lOyZEjeb6KdTPqZ/F2VZ5V80kR9YvL7HxJZW6sxheaoNA/of4jb7XHTWWedFfcHTFbZfnuj31KS801ZufWur/BuYYlVWWOLbfJDYzz1j6TA6H9Irfm7pFFbauGm2f6OgXqu+VSbZzS/utVWWvhFdp1yWbW8Fclkz4vKhRbMmxwvVsmjJmuoXab8NDkertqmKf/mjRX0XCa0nVDbpDKYVp78e9q9UUcIJ03ZNGpnHu30GCjJIvlbfazdd9/daZ7KWGrMpnks/2NavVeWYzIs+/3zn/88Dktzts8++6w9yrSrhB0St6r1fKagiQfa9clXhFdfVu2IdtqXkroxF2/1r8yUnbuw90LtMnWK72fV+DTZ95E8VeUvm4+alrtoXkFrhtqcxf9QWnM06mto7i1p6rTF5ldRm6m5m6L1T/NLdq/kEV+mrGuVOX9OK3Q+TP5J6VIKVra+q7Ks3W3HjBkTrf1mhVl0Py+PSFegyf5LlXFMkfzJ51XCaLLcFeVvjZWK2mU/Tnnpo7nlMn5VYVOlzVK7Y22NbM3hYyAAAQj0CoGBVgRMVtB+Zf3888/HaTR58uRoMlwdxKSCl5QI/PeS1/6koDzUbksajPhHVNg7WgCS8tkdd9wRh+1f6F0djaHJenvH7HnmmSfqxGqXJ5lDDz10iBtza8dmaLHR7iVt7bpkRjuX+F/iyq1kUBha7NLOI8n3/S3hk8/02zj6SglJdzqCUsckJ+/7v/VFs9JKE7F+B953o86Dr3AQwkYDdN8P/9p2M/IX4/3nur7nnnsMH3aDBCiz7SuzX/va14LqDSl6JfO7/d5mm20Gosy2o14KLSYhYft+SQlJA+Sk0p4UTrSAq/rd0s9sKdoljdoyfVFsC8PmVr/VPmhw/vrrr0evhe7SbdMAACAASURBVNSxRfnID1/1qZTMLUyz/+M//iNSmNhzzz2HPDM3UrKR0desdi/N1s64MlJSSntu9zRZ7ZuqbXpT6eLLEnLtH6910UUXZb5iu+FqcTO5S6Remj59ei4na+OTuxkbR9mXXHJJS/hlWTadz1qEyfjhfylpcfnUpz7ldJSMzLLLLjuEi/9hSYa3Q277ioAWjm+rjGqC0p/ElichTNrZ91M74u/S5cts11ro8k2IzE31yar0ZxUn35Tpi/vvabFQE+JpfUf1qaW4p8U33yS/BjeGZls50zv+USn2XKyL6sfQMIr6677cZcuyvVtU15dt/6qmlcmTZofweuKJJ4bUA5Ymtovz6aefnukmebxzVZ6SvwyDkLLY7vojRIai/kNIPumXsWOdNslX4rP86dv6uEJKODp+PMs04Yfl0yrzI1XiXyRzso0qm1eyWCXvh+TTkPpG/papn5NyJH830U4m/fR/1+FZti5soj7xZdcHoX4Z0bX6yRr/2k7p5r6b4ybJoHKb/EDFZNcJJ9ohXjyTpozcelcL41Ia8o/Wq3I8sJSU7ENjk9NsLfb+8Ic/jE5QScqr36eddlrLsbN6T+9o8U19r7R5V80z+qZbbaUvQ9511XJZp7zlyeM/yysX+vDc0jHNtn5smfFi2Tzqyxp6HVp+mh4PV2nT8vqcYm6MQ9oJKW7aDveqJ7QzsBba1T5rk4PkOkTWaUll0qidebQbY6C8PObvbpQsD/qALmnKcEy+a79VR5vyjMaKoaZs2CFxq1PPh8qtHWGTyiNirTlL5Vdby/H9Kzt34b8beh1apyT9qxKfpvs+kqmq/GXyUdNyF80r6KPyZDm031IoTTNV22Lfr7w2U+u5JkOarfXPpOmVPJKUK+13lfkw+ZO3DqiNbaqavDxSVC9Y21qm/1J2HFMlXmXDaLrc5eVvjZWK2mU/znnpow9ny/glf8uyqdJmTZgwIS7DWoN78skn/ShxDQEIQKCrBAZaEbCb5HUMhY7SuOqqq6LdkzRJYkeKFMmlIwp13IW+/tBOJXpXW92200hpQbvOaGJPndPkMYntDDvUb02WS4nyiiuuiI6q1JddUlTEQKAJApTZJii2+jEIZbZX4qidXLQLinaErXOEuY4gVd2qtkATQskjSVtTuLlfCkfHQWlCWrtP1IlDc1K971PV+qGpdHlfkuF/VZVlp2KuxXv1vbR7lq7bYXxFQH1soTyvvC9FVS2YdMoMh75fWRZNxalqX1z5Wx8YKU2lDKsJZd3rR1OlLLejrq+aVr2WJlV4Why6waCpsmZxaIfdK320OnGr2ib5CnHaoUF9RC2Q6iMBfUCnxZoi04QffhhV8njV+Pvhhlz3Q14JiafcdKLs1uFZJZ+Exr3InXYYVznRbtkaByU/yEi+361xk8nxzDPPOB0RqnGbds8L/TilrNwaz9hOTP7OxiZHiK35zIceeiga70n5SP0kna7y1ltvFb4+bdq0OJ4ap4bUX2medqOtTJMj7V6dclmnvKXJkrxXtlwk36/yu2werRJG1fJTJSx7p1NtmoVntsqa7RoqRdrkx0lyp3yU/OBfZTbLlEmjdufRLBmHw/0yHJuOT9Nh16nny8RNCh9at/rVr34V9W9D274yYVRxW7VO6ZX4VJW/6XwUyl7hNj2HXKctNrnb0Wb2Sh6xOObZ6sf3ynxYO/JIXtz1rBPjmE6EkRXPJvN3O9KnDJsqbZb6U5dffnm0eUIWI+5DAAIQ6AYBFAG7QZ0wIQABCEAAAhCAAAQgkELAVwT0j/5NccotCEAAAhCAQGkCvhKfjrKvYprwo0q4vAMBCPQWASkbavca7ZBoR5H2loRIAwEI5BHQrpu2A5VOk8gyUszVcbbm9oEHHshyyn0IQAACEIAABCAAAQhAAAIQ6AECKAL2QCIgAgQgAAEIQAACEIAABEQARUDyAQQgAAEItJNAE0p8TfjRzjjiNwQg0H4Czz33nFtjjTUixaDjjjuu/QESAgQg0DiBPfbYI1bu23vvvTP91046Sy21VOR2ueWWy3THAwhAAAIQgAAEIAABCEAAAhDoDQIoAvZGOiAFBCAAAQhAAAIQgAAEUAQkD0AAAhCAQFsJNKHE14QfbY0knkMAAm0joKM8Dz30ULfIIotESkE77LBD0DG+bRMIjyEAgcoEzjjjjFgRcOTIkdGxjWmenXrqqbE77SKIgQAEIAABCEAAAhCAAAQgAIHeJoAiYG+nD9JBAAIQgAAEIAABCAwAgbfffttde+21brbZZmtZZHnllVcGIPZEEQIQgAAEOkFg2rRpbuONN47bmZVXXtk9/vjj7p133gkOvgk/ggPDIQQg0HME7r333qgOWXvttd0555zTc/IhEAQgEE5g8uTJLR+iLbPMMu66665zGpvKTJkyxR122GFuxIgRUbkfN25cuOe4hAAEIAABCEAAAhCAAAQgAIGuEUARsGvoCRgCEIAABCAAAQhAAALOzZgxI15cmWWWWWIFDbveaaedwAQBCEAAAhCoReDII48c0r5YO6MF/gcffLDQ/yb8KAwEBxCAQM8TkEIwBgIQ6A8CN998s5tvvvla+ghzzz13vOun+grLLrusO/vss52OCMZAAAIQgAAEIAABCEAAAhCAQO8TQBGw99MICSEAAQhAAAIQgAAE+pjAW2+95fbbb7/Mv7POOquPY0/UIAABCECgEwQuv/zyzHZGbdCzzz5bKEYTfhQGggMIQAACEIAABDpK4KmnnnLjx493o0ePdiussIJbYokl3AYbbOD23XdfN2HCBI7/7mhqEBgEIAABCEAAAhCAAAQgAIH6BFAErM8QHyAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAl0jgCJg19ATMAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoD4BFAHrM8QHCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBA1wigCNg19AQMAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoTwBFwPoM8QECEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINA1AigCdg09AUMAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABOoTQBGwPkN8gAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIdI0AioBdQ0/AEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACB+gRQBKzPEB8gAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAJdI4AiYNfQEzAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKA+ARQB6zPEBwhAAAIQgAAEOkjgjTfecNdee6078MAD3S677OIee+yxDoYeFlSvyfjee++5u+66y40fPz5idumll4ZFBFddI/Diiy+6Cy+80I0ZMyZKM6UhBgIQ6B8Cjz76qDvllFPcrrvu6r7//e/3T8SICQQaIqB+y7HHHuv22GMPd+KJJ7q77767IZ9725sm+mxN+NHblLon3csvv+wmTpwY98+mT5/ePWFqhDyo5asGMl6FAAQgAAEIQAACEIAABCAAAQhAAALDhgCKgMMmqRAUAhDoFwKTJk1yW2yxhdtnn33ctGnT+iVaxAMCbSfw9NNPu6233tqNHDnSzTLLLPHf7bffXjnspstjO2SsHLn/e3H//fd3SyyxRMxL7I4++uhUb5vmkRoIN3MJ3HzzzW6DDTZws88+e0uavfvuu7nv5T2cOnWqO+KII9ymm27qLrnkkjynPIMABHIINFGWTjvtNDdq1KiW8r3ZZpvlhMojCAx/AmXKzquvvhr19/y+nq5HjBjh1OebPHmy22+//SI3f/jDH4Y/HC8GZfps3mstl2X8aLLfVyaNWwQeJj/uuOMOt+GGG7o55pijpf7Whxt1TJNpECJHUfkK8QM32QQ6nZ7ZkvAEAhCAAAQgAAEIQAACEIAABCAAgUEmMNCKgD/60Y/cTjvtlPl3wQUXDHLeIO4Q6DkC/VBmp0yZ4uaaa6548UA7PfWLefzxxzPrU6trDznkEKcF8Ouuu87JfZFiSzv8lKKWyZNm77777u6VV17pl2Tpq3hoJ5ill146+vMXh6sqArajPDYtYxMJuMoqqwxhlqYI2A4eTcg/aH6ceeaZUXrNN998cVuh/F5UX+ZxGjduXOzXbLPN5lQPYiAAgfIEmihL2slWbZnfjqEIWD4teGN4EQgtOzNnznTrrLNOVD7+8z//M9oR1y8rp59+utt2223j8vOBD3zAaRfmfjGhfba8+Ib60XS/LzSN82Tv5WfajVx194ILLhjnP+XNl156qbLYTadBkSB++VprrbWGlK8JEyYUecHzHAKdTs8cUXgEAQhAAAIQgAAEIAABCEAAAhCAwIATGGhFwC9/+cstE3j+BLOud9hhhwHPHkQfAr1FoB/KrHat8Oua7bbbrrcg15BGxwv5cQu5lqLL4Ycf7p5//vnUkNvh53333Vco50MPPZQqDzd7h4AWfi2PVVUEbHd5bELGJol/5StfiZmlKQK2m0eTcRkEvx544IE4vZTX6ygCjh49usWv+++/fxAQEkcINE6gybL03e9+Ny6XKAI2nlR42GMEQsuOjgJWmzfrrLM6649feeWVbuGFF3ZLLrmke+KJJ9yyyy4blx0pt+uo1n4zRX22kPgW+dF0vy80jUNk72U3GrfaGER2HUXAptOgiJuVL+2u+dhjj0XOk+WryA+eZxPodHpmS8ITCEAAAhCAAAQgAAEIQAACEIAABAadwEArAk6fPt099dRT7sQTT2yZyPvJT34S7ZQyY8aMtuSPiy++2I0dOzZT8aUtgeIpBPqAQLfKrMJVmT3nnHNqU5RfdhzcnHPO6a6++urafvaSB1qIu+2221qOtPz617/uHnzwQff3v//dXXHFFe573/ue22233VqOVZpnnnncj3/849SoNO3ne++953R01T/+8Q+3wgorxPX/xz72MffPf/4zeiY3VUyTeaVK+IP0ThNKdu0uj03I2GSaFi0It5tHk3EZBL+aVATUDi+2aL3++usPAr5hG0fGCb2ddKFlKSQd+00RkD5Qb+fdbksXUnbefPNNt8ACC0Tt1dprr90ispStbOe/Y445Jm7T+vXjzaI+WwucjB9FfpTp94XUaSFpnCHqsLrdpCJgmTSoC8kvX1La9I1fvvz7XL9PIKQMdDI935eMKwhAAAIQgAAEIAABCEAAAhCAAAQgMJTAQCsCGo5bbrklnkjWIql2i2qXeeedd+KjoG6++eZ2BYO/EOhrAp0sswL5s5/9LKojNtxww0a4vvXWW05fiz/33HON+NeLnsw///xxvXryySeniiilu8997nOxO9W/J510Uqpb3WyHn5///Ofj8LXjZF3TdF6pK08/v9+Ukl07y2NTMjaVjkULwgqnnTyaiseg+NOkIqCYTZ482anvWVXReVC4dzOejBO6ST887KKyFJqO/aYISB8oPA8NqsuisnPRRRfF/fI99tgjF5M+6NHO4f1qQvpsRXEP8SOk3xdap0meojQuknk4PG9SEVDxDUmDJrj45Wv//fdvwsuB8aNMGehUeg4MfCIKAQhAAAIQgAAEIAABCEAAAhCAQCUCKAI65zqpVHTZZZfFk9soAlbKs7wEgY6WWeHWTnFSUmtKEXAQkjBEac84+ItU4nzBBRfYoxa7HX42rQhIXmlJsrb+6DUlu7TI9pqMfllLOxo4LQ7c6x6BphUBuxcTQg4lwDghlFRvuwtNx35TBKQP1Nv5cjhId9RRR8VzJUccccRwELltMjbRZ2vCD0UwtE5rG4we87hpRcBORc8vX+PHj+9UsH0RDmWgL5KRSEAAAhCAAAQgAAEIQAACEIAABAaKAIqADSgCvv322+6OO+6IdlnRkRpZRhOG6667bjy5jSJgFinuQyCfQF3l3dAyKynOPffcuMw2pQg4c+ZMd9NNN7lJkya5oiPIn332WfenP/3JXXfdde7uu+92L7zwQj6cwKfaEeqJJ55wf/nLX9xvfvMbd++99xbKEuh15KyM0t7TTz/tRo4cGXNeaaWV3LvvvjskuHb42aQiYDvyyhAIOTfE7NFHH43zinYjKGP+9a9/Re8qvymP9rrJUrKbNm1a1B7/7ne/c1OmTCmMRpnyaJ5pN88bbrghCkc7n2SZpmTM8r/ovuJ//fXXuz/+8Y/ukUcecdr1Usq2+stSBAzloeP5tKvnb3/720g5++GHH3aqW0NMKD/tvqH8qLoyq38lN7feemuuG8lUti4t697iXacciZ+OUBdT7XKkY9YtvWSn1YsWboj95JNPRpzuueeeEOexmzJtZvxSiYtXXnkl2o3b8qoYNr1rYTfS0xAov6s+euihh3LTsM44oU6+MznrpHNombawzG5CbvOrk3ZeWSqTjlmKgCrr999/f9Q/u/POO0uVB6Wj+nTKc+oTqI4sMk3Ute3sAzVZR9TJ50Uc6z6vWk8p3G72AcuU/7yyo3jsueeecbt33HHH5SKVsvzvf//7KJ/nOvy/h2XkDPHP3DQ1pqrSZzMZzK7iR1G/r0ydJjmK0thkNbtKnWXvJu0q5btK+omJ3z/L6qMm5cv6XZQGWe/pfmgfQ2798nXCCSfkeRs/azJ9NBa58cYbner0ECMumqvI2vlTaae+leZJNK8Rasq2KWXLQNn0bJJxlTIQyg13EIAABCAAAQhAAAIQgAAEIAABCAwvAigCBigCajJFi5MHHHCAW2GFFdyHP/xh99e//jVSANh8883dvPPO2zIRKHdvvvlmS07QF+3zzDNPizsptcgv+8s7ErPFM35AYMAJFCkCNlFmpYyx+uqrt5TZ2WefPS6vVm5DFTMk08UXX+x23HFH98EPfjD2N02JSJPa55xzjlt++eVjd/5iw6qrrhpNelfJBlpkVB215JJLDvF71llnjeQLUZ4qCruM0p78Ov7441vk+dWvfjUkiHb42YQiYGheOf/884fkH8tHWpgxo2Oj7b5va5Fdi/VaQDnkkEOcFCb1XIpSUiZYaqmlWhjq989//nPzNtV+8cUX3e677+4WWWSRlnelmLnddttVUjxVufHl9q+1WOWbNdZYI3KbVP7UAo3yqP+uro855pj4dV/JTotE1157rdtkk03ciBEj4riozIptUimyTHm0AF977bWIe5KVyuaoUaOcjttKmjoyJv0K/a2F8S996UtumWWWiTn49Ydd+4qAZXg8/vjjbpdddmnhbH7ON9987vDDD08VNZSf6lQp/indFl544TgOt99+e+yvudGxgQsttFDsRvnAN2Xr0rLuLaw65UhxUfnVhyJzzz13HBdj6tuh7Y3JJVsKmt/61rfcmmuuGfu92267+U4iBc66/dwWDwN/qJ5XHaxy6sdT14svvri79NJLA31Kd9ap9JQy+09+8hO30UYbuQUXXNBtv/320SK32rWPfOQjLXFbdtllozo7KXGVcULZfNdE38iXO7RM++/ouqzcyffTfv/yl78c0l5Y+/G1r32t5ZULL7wwdqujbH2jNLP3fPv1118PKktl0zGpCChlA+3YlOyjqT4v+oBMbaDqkeR4c6655nJjx46NlKH9uFo9WreuDe0DVam/qtQRdfN5Xl4K7av5nIuuy9RTvdQHLFP+Q9ohKa8q76vut/ZglVVWcVtttdWQvy9+8Yst7vRelikjZ5YfWfebGFNV6bMl5aniR2i/L7ROC0njpNxl6yy9X7d8+zLUSb8mFAFD06CJPkZa+dLYxS9fyke+qZI+quc1Lj3ssMOisZHaUc1/qBx+9atfjcv2bLPNFn0AIgb6sOrAAw9smWeVYt+mm27a8qGg5kUmTpwYiahxohQZk3MlK664YqRE78fDv67SpoSWgdD09OWpwrjJMuDLwjUEIAABCEAAAhCAAAQgAAEIQAAC/UcARcACRUBNMn3oQx+KJ61sYtpfnLZ7vu0vsCvbaAF2gQUWcFK0MXdaMNc9+zvxxBP7L4cRIwi0gUCeImBTZVZKPSqbviKPlBWsvJodurCpSXwr+76dpgjoK8VJMeS//uu/3M4779yy6KUdsMoafU2/2GKLRXJI0UvKCgcddJBbbbXVWmQbPXp0Wa+HuC+rtKfdDn0uWixImnb42YQiYGhe0a5sW2yxRaQoYnGdc8453dZbb+1OP/30OLraYVZHedliqNoKLXpqUUQLKvZuqJ1sjywgKRSa8qDaJqW7FFWlqGJ+S/ldO3uUMVI0XWeddWI/5JeOC9xpp53cyy+/HHuleFo4srVwZEblWO4/+tGPRm4k38Ybb+ykwGHGL5tJhXzfX10rn/umTHnUe0o7X7HuM5/5TKS8KoVMa9dla3dN39SR0fcn9Pq8885rWTRT+qks7b333m6DDTZwUggxNn6+COWhhc8lllgi9kMLbv/93//tPve5z8UcpAyVNGX4aUdHk9G3fUVA7bziP7PrpCJg2bq0rHvFs045Ek/xMvmVX1QO99tvv6gO8Mui3IS2Nz5/Ka6Y/2b7ioBNtZl+mCHXe+21VyyXFIKlqKQ2yd8dVvJKMbqq6UR6ascq4+rb6iP4v/1rfQygfO6bsuOEsvmu6XQuU6b9eJaV238371pKcmpf/TZS9d0222zjfvGLX7S8qj6VpccXvvCFlmdSuFebPMccc0RulltuuWgnVX3gVVSW5FHZdPQVASWT32aYjGZLwU87KCaNZFMbZ22R2kPF++CDD47bUPmh+9pl1ExTdW1oH6hs/VWljmgin1teklKvsS/TV1MeL2NC6yntwuvnb5OtyPbbel+uOmWxbPkPKTtpboriZs+zFAHLyunzKbpuYkxVtc/my1bVj9B+X2idlpZ+fl/Dl7lqndVE+TY56qZfE4qAIWnQVB8jLX2s/Jht47Cq6SO2NnY1P2VrjLveeuvF9ak9U3uUVqel3bN3VBdr7JXXx1p00UWd6sukqdKmyI/QMhCSniZTVcZNlgGTBRsCEIAABCAAAQhAAAIQgAAEIACB/iWAImCBIuD06dOjhRQtpvg7lmgxRkoKp512WrQLkCbZfUUEXacdxeRPbBXt7NC/2Y6YQaAegTxFwKbL7I9//ON44rrO0cCa8NUxbddcc03snya1k4qAmkTWV/J6pp3NfCUsTWrbbmRVFAG33XbbOGwpVfnmkksuiZ8p7KRCje825Lqs0p4mtm2SX7YUyZKmHX42oQhocobmlR/96EdxXLWQkdytzvzTjrNi8c1vfjO6pWOG1BYl2yMteEgJQLvYKB2lROTviic/rrjiCvM2srUzn+1MqfZKR5GaURmSMpKlR9ZCnrlPs3Xktb8rkRYqk2bcuHFxGApLu0ckjTFNyw++woS1ydqVS7s9HHnkkS0KFVIIUR4zE1oe5X7q1KktO9NpZwjfaFHKWGmhzTd1ZPT9Cbk+88wzYzmkxKL+SdJIwdRk9ZUDQnlIodDeHzNmTIv3/7+9O4G/b6r3P+5q4F403FyhJCE/IVTKrLplKBmuoSRDpsxKUXSFlJ8uoluI+/OTMs9KwlVShqiEfooMGTJLxrqp2/4/3rv72f/P2d89rL3X+p7v+X6/r/V4/H77DPusvfZzDXt/z/6ctY8++uj8vXIgYFc/zXShsVL/fOCiDwT06/jAMT9udR1Lu66vnY/pRxrPFahpnprJqxzko9lZ7H0tuwbSqIy6vagsfd37Pp36mDnQKGqeaGYav1/XXXddsabK4wOJNSNMnzSs+jz//PPzMXnNNdcc2CcFMR9yyCH5uHzyySdn73znOwfeP+KIIyp3K+TvhD7tLmU9d+3TtqN9ym2fDV0efPDBhbP6Vzk98cQTRZCf2qDGSh+grvXVz2ymUc3WY6mtL9l6WobUo9YrBwJqxmftg47nmq1QQee+r9j5gN+WZmG1dXROMWfOnOJtzTanGVztfQW3aeYuJT+Oxoy1tjE7XmtbMefLyq/vGJGynfc9VzOPkGWXcWpUzgH79P+QvqPZhvUDET9W6nxWr1X9W2ONNYp2XRUI2KecIXVm68T+TRVzzmZliMkj9LzPttU2poXUseXVd8xK2b9j6y9FIGBIHaQ6xwjpX5q1T6lv/eizmoFXf6fa9xV27NFSP2zzPz7UeX3V37X6m04/gPzqV7+anXbaaQPHMMtvscUWy/bbb79Mwfua/V7n0Paelv6HYypX32NKDvJ//7X1gZD6tPz6GqfsA1YWlggggAACCCCAAAIIIIAAAgggMHUFCARsCQT0Ve8v6CsQqZz0ZZX/AkpfuJVT2xdI5fV5jgACYwWaAgH92in6bMoLmyqbviT240Q5EPCCCy4o3teX2uW0yiqr5O/3CQT0M5mWAwF1gdECw1S+8gw65XK0Pe8atKf8fPl0Eb6cxiPPiQgE1Cw8/qK73ebI768uDiuYT0Gh9957r38rf+zbtg9+shV1GyYfvL766qvbW/ly2223LdrZUUcdNfCenug2xtZOVY4HH3xwzDptL/hZl3T7YZ8UnGCzEdp2lllmGb9K/lhBQHp/5syZY97zBlXHZAVwWN5aavaPcmrrj1rf38pKM9+Vkw8E9Lcu1nopyljeXtXzZ555ZuCim24tXpV8MJgPBLR12zze/OY3F6bl2Q8tyLkcCJjKzwcCWnm19OOW7wtdx9Ku62vbMf1IgWDWPnX71IzkUgAAIABJREFU2KqA4BSBgGalAFbbng8EtPe1bGuvIee5Pr+6xwpStbJo6QMB9ZkvfelLxfuaka1PGnZ9+oAuHVfKScFl/hintlOVQv5OiGl32mZsPfft07HlrvIqv1aeabZ8/PSBM9YGFTzuk8YRvadzjqp+GdKXQupR22xrNzpWrrrqqkV/0A8EfLrjjjsyfwxSuy8nnd/pFpC2v6q/cvJtoutYa3mlPF9OMUb4fao6R2gbz1Kcq5lN3bLPOKW8/L75455tZzzPAfv2f5UtpO/4AC3d+rMuKVDI2nRVIGBMOeu26V/343nXv6lSnLOlyEP703beZ/scOqa11fF4jFl9+ndM/ckkRSCg2YbUQduxIvQco61/paqft73tbUX/VD9V0J7N0qfgRv0N8uSTTxrBwJhWVZ9+9mwFvJd/GKO8/Q/QFGjnU4pjSmgfaKvPVMb+OFBl1naM8z48RgABBBBAAAEEEEAAAQQQQACBqSlAIGDPQMCqiyS62GRfSGtZNeNf6BdIU7O5sVcIpBHQl52+r/nZT/wW/BekfftsygubKlvbl8OzZs0q9k23BS4HLykAUF+ga1abrunb3/52PkORZptRoFc52Qx0stUFh5jUJ2hPM+FYvSqITbPV+DQeeU5EIKD2yV+grAoa0UxSstAtgatSW9vWZ3zQhfKyfqIgdbuFoJZ+pjzb1mOPPVaso8/qtsRdk7/ooqA/n3R8VL669am/vdPtt99erPbcc88VAZMKiCqnEAMfJKb2X05t/fH+++8v2qTKW5XHHnvskQdZ6LbKmvHKpxRl9PnVPdYFNZVP/xTwUb5AZp+LDQT0wZvKy29HQRMam/zFqGH7+YCIrmNp1/Vj+pGsfNssz1xi9TWRgYB9j5lW9qalxhcF9+pYpH8KfPZJxz1rz+pDfdIw61Pl8xfpN9hgg8oi6zbatl91M6a1/Z0Q0+6sUG3jUtPfM337dIpyW/mblgqc0yy55qx24JPc9d7KK69crFMOjPv3f//3/D2NcVWpLdBFn2mrR8s3pN0cc8wxRVl1i0Kf/Gx/2u/yeZOt67ej86vy7KNtbUL5+DHLj7W2jZTnyynGiLZ9amrntk+x52qWT92y6zhl+bTtm9Ybj3PAvv3fyh3Sd9oClSyvpkDA2HLaNpqWMX9TpThnS5GH9q/tPNgMQse0tjoe1pjV1r9j6k8mExkIGHOO0da/UtWPDwQsB+VZm/LLtjHNHwc1s21V8sGCO+yww8AqKY4poX2grU+lMm4za+sDA0A8QQABBBBAAAEEEEAAAQQQQACBKSlAIGDCQEC1EP/r4h/+8IdjGk3oF0hjPsgLCCBQCKQKBFSGbX025YVNba/ty2FdLLcL2Frq1+0KmNCtYMYr6TZxs2fPzjQjm21bMzLFpD5Be/42mfp8OY1HnhMVCHjllVcW1grGu/vuu4vd1W3lLeDqkksuKV73D9q+/Ne65ZmRzj777DwL3SbY6lm3RdTtbKv+2TpannDCCX7zQY8126XPwwfz7b777vl7upDpL1b7dmflnDFjRuX2QgyWXnrpogwXX3zxmHza+qP8bR9UT1q/S0pRxpDt+VupHnbYYbUfiQ0E9HUlF92aT+2sLg3Dry44petY2nV9a59y6NqP/GyVms1Lfb4qjVogoMrYdsys2o/Q1xT8q7FRtzq3fqdjYJ80zPpU+XygVd1Feh8crdtfVqW2vxNi2p1tL2Rcqqvnvn06Rbmt/G1LzUBr7ccH8z388MP5LLva/1tuuaVYR2OIZs2ztNJKK+XvlWcKtPfbAl20Xls9Wl4h7Ua3Cbb9UQCjT/4HFAo0rUuagcjy0LK8byFtom6stW2mPl+2fP2yyxgRsk917dy2GXuuZvnULbuOU5ZPyL6Nxzlg3/5v5Q7pO22BSpZXUyBgbDltG32WIX9TpThnS5GH9q/tPNgMQse0tjoe5pjV1r9t3/wypP60/igGAoacY7T1r1T14wMBq3586M31uG1Mu+qqq4pjWF0g4AEHHFCs03Q89NvuckwJ7QNtfSqVcZuZ9rNPH/A+PEYAAQQQQAABBBBAAAEEEEAAgcktQCBg4kBAPwsFgYCTu3NQ+tEVSBkI2NZnU1/YbPtyWOo+CMIu3OqLXP0avuo2dX1qSoa77rpr9rrXva740ty2paUPyOqTf9egPc1i429lqwsI5TQeeU5UIKBmLVpiiSUKe128sKRZKlQHuiWmn3HN3tcy5Mt/meq2vlavVqeHH3548Zq917Y88sgj/eaDH/uLHbpFkZKCnhZaaKG8DAp0uvDCC4vy+FsY77TTTvnruqhYlUIMdMtV27c+gYDeauGFF64qRuNrKcrYuIH/e9P3japbTVsesYGAmlVykUUWKUzNVjNX3nrrrbaZYjkMv6bglK5jaZf1/b6ZQ9vS+tGXv/zlwrAu0FWIoxgI2HbMLCo/8IFuDafjrMZif4tTs+wbCKjND6s+ta2QgC4FVNt+9Q0EjGl3ViUh41JdPfvtdxkT/efMoG1p/cXKHbo899xzC2edO+l4q2TncxYcuOKKKxbrXXHFFfk6milP5VJbrJotVyu1BbpondCAgZB2oxlDzcoHAuoY7/uMZtesS7ptouWhperDp5A20TTWKi/zVf51M176bYY+7jtGhOxTXTu3ssWeq1k+Tcsu45TlE7Jv43EO6Ptxl/5v5Q7pO22BSpZXUyBgbDltG6HLrn9TpThnS5GH9i/k71KtFzqmNdXxsMestv5t9du1/vS5UQwEDDnHaOpfKesndSCgvle1Y1hdIKCfJbMpELDvMSW0DzT1qZTGIceB0D5gfYElAggggAACCCCAAAIIIIAAAghMLQECAQkEnFotmr2ZFgJTPRBQlXjsscdm/iKPffmt4LHy7YK7VLp+lb/88ssXX6brC+KPf/zjmX5pv/HGGxevW9BYl7z9ur7sRxxxhH+r8vGdd95ZbFv7qtusltN45DlRgYDaN83cZvWq4CpdHFBSUJVeL1+o9x4hX/5rfX/R4tBDD82z8LenXG211bKHHnqo9Z9mTOiT9t9//2If1b6UFHCh/bOAhj/+8Y9FYOPcc8+dX1zTxfdFF100X6/qFoTKJ8QgNhDQzyylGSu7phRlbNumZrmydqSl+nJdig0EVL6a6XHttdce2Ka2q7rba6+9BoJXh+HXFpzSdSwNXT+mH33sYx8r/NZZZ5266prSgYAad9Q+FOin9qMZNzUen3TSSdlll11W+MQEAgp2GPWp7YQEdIVcpPdjtm6hXk4x7c7yChmX6i4e9+3TKcpt5W9bPvXUU/ksnTYu/uIXv8g/okBzvWZB4YccckjRznSLd6Wvf/3r+Wvrr79+7WaaAl3sQ231aOuFtJu6QMDyrLtf+cpXLNvKpf9hwKc+9amBdULaRNtYmzoQMHaMCNmnunbucWLO1Xw+TY9DxynLI2TftK5vhynOAfv2fyt3SN9pClSyfLRsCgSMLaffTtPjPn9TpThnS5GH7VdT0JKto6VvS1XHJlu3qY6HPWa19e8+9Wf7ORUDAVPWzygGAsYeU0L7QFOfSmkcchxo6wPWnlkigAACCCCAAAIIIIAAAggggMDUFCAQkEDAqdmy2aspLTAdAgFVgY8//nimQKr555+/uFiti9j6ItrfSjakshVY5S+aKahOAXqaJcbSFltsUWxn2IGAum2tXbTXsip4sGsgYEieExkIqJmH/IV53TrxvvvuywOqNMuPLjLVpZAv//XZBRZYoHA9+eST8+wU+GnWmhFpPNPVV19dbEtBBJoNcIcddshf87M9bbrppsV6s2bNyoNdVUYFA9psTuVyhhjEBgLuu+++RblUJ3UzNJbLZs9TlNHyqls+8sgjRRllpttI1qUUgYCWt25vu+qqqw5sW9tX+7I0DL+24BSVpetYGrJ+TD/SbKzWB3X76ro0VWcEvO6667IFF1ywMNDseDfddFPBcNtttxXvxQYChtZ/TH1qGyEBXSkCAWPLqbKGjEt1F4/79ukU5S4aSMCDd73rXUUb0rFGx1YFm+r8yWZW9rcHXmyxxfJcN9xww/xzCgisS02BLvaZ0ICBkHZTFwioAH0bR7RsmhFQ++zX1XZ9CmkTbWNtykDAFGNEyD7VtXNvE3Ou5vNpexxy3LE8QvZN66Y+B+zb/63cIX0nRSBgbDmtvHXLmL+pUpyzpcjD9q0paMnW0TJ0TGuq42GPWXX9O6b+zGQqBgKmrJ9RCwRMcUwJ7QNNfSqlcchxoK4PWDtmiQACCCCAAAIIIIAAAggggAACU1uAQEACAad2C2fvpqTAdAkEtMrTxYatttpq4CLuzJkz7e2gpW4XaheBFXymAK1ymqhAwBdeeCFbZpllivLplj9/+MMfysUbmCGxKlDQfyA0z4kMBFR5bfY/1Y0e222NPvShD/ndGfM45Mv/xx57rDBV/roIouQv1iu4TRefxytplsNXvOIVRTl0a6dXvvKVebCjLq5b+uY3v1ms84EPfCCzmZp22203W2XMMsQgNhBQtzO2fqOlAke6pBRlDNme346/zXT5sykDAS3v888/v7jVs4wUuKz+p5TSz9qvbdeWbcEptp6WXcfSpvVj+pECra1dqQ8+++yzvpjF46kaCKjbIdv+r7feemOCfVMHAhroeNWn8g8J6EoRCBjT7szBjxc33HCDvTywrLt43LdPpyj3QAFbnij4z9rYuuuuW9TPLrvsMvBJzfRq611zzTV5kKQCBjXjVl1qCnSxz4QGDIS0m7pAQG1Ltz628ivIvi7pxyO2npbf/e53B1b1baLvWOvrOPbWwCnGCL9PXdv5AE7EuVo5n5DnTeOUfT5k38bjHLBv/7dyh/SdFIGAseW08tYtY/+m8vXX95wtRR7av6agJb//oWNaWx2Px5jVtX/H1p9c1E/9mFr1d6v3a3ocUgchx4qQc4y2/pWqfkYtEDDFMSW0D7TVZypjPwZ07QNN7ZH3EEAAAQQQQAABBBBAAAEEEEBg6ggQCDiBgYDlizBTp1mxJwiMr8BEBQK+/e1vj96xti+HP//5z2drrLFGdu211w5sS7MXKEDKLjroNntd0u677158Vl/OV6WJCgQ86qijirJp/3QBryp1mREwNM/xCgQMbSsXXnjhwL7bDIFVgZreJOTLf38hVPnqdolKCsazdqTl0Ucf7bNO/njLLbcstrfKKqvkjzUDmE9PPvlk9uIXvzh/TzOAvelNb8ofX3755X61gcchBrGBgLotsbfSrJpdUooyhmxPMztaORdaaKFMt1uuSjGBgArc1Ni04447jslatyu37Wup2beUYv18kJ/6SlXy6/jbSHcdS7uuH9OPyv1e41VVmoqBgOXglEsuuWTMrqcIBBxmfWoHUl2k9xeaq/5OiGl3Bh0yLtUFAvbt0ynKbeUPWf7qV78qxiQdU5Zbbrn8ucrh0yc/+cliPQtcWG211fwqYx63BbroA231aJmGtJumQEA/m+7CCy+cz7prefulbhtsY7QCHR944AH/dubH0a5jrWXkAwFDz4Hss36ZaoyIaee+PHpcHrNDz9XK+ZSfdx2n7PMh+zYe54B9+7+VO6TvtAUqWV5+lvPPfvaz9nK+jC3nQGYVT2L/pkpxzpYiD+1a29+ltvuhY1pbHacas0L6QN1xLLb+ZDJVAwFT1Y8dT3Xc0S2Y21JbffpzCP1gsCrZj+m0Tf89SapjSmgfaOtTqYzbzGRU1we8n/14y7/GYwQQQAABBBBAAAEEEEAAAQQQmBoCBAJOYCBgXbDL1Gha7AUC4ycwUYGACrDpemvQskLbl8Nbb711frFWF1TLSRcM7UKuZkzrkvbaa6/is5qhzW6N5/N473vfW6yji9MxKSRoT5baT/9F9rLLLptpFrmqNB55jlcgYGhb0b76L+lVvwpYaEverGoWAN3y+fWvf31Rn7oto08rrLBC8Z5mJqib6U4BqKeddlp25ZVX+o93enzqqacW27L2e9JJJ43Jw9eF1lNgQtPFiTYDbSA2EFB5KKDByq2L/9/5znfGlN1esGBLe56ijJZX0/KYY44pyqiyqr+r7spp8803L9bTBbNyahqf7rzzzvyzuhBWTqqneeedN39fM9z58SXGb/nlly/KWzfblQ9g8YGAXcfSruvLoG8/krPGCGtXeqzgt3KaM2dOsY7WjTn+tF2c17ZD2qsfr8pBVeXyVz1/4okn8lu02r5XHWuuv/76Yr9Vpj5pmPWp8oUEdIXM1uMvNNf9ndC33ZljbD337dOx5bbyhy6XWGKJoh2pvb32ta8dMy6Wzye1nmbsbEohfSmkHrWNkHbTFAh41VVXDezjxRdfPKboGjdWWmmlYr3ttttuzDoxY61l5gMBQ8+B7LN+mWqMiG3nvkx9z9V8HlWP+4xTyqdt38bzHLBv/1e5Q/pOikBAbSumnFV15V+L/ZsqxTlbijy0T03nfX6fQ8e0tjpONWa19QGVve58Jbb+lPdUDQRMVT+jFAiY6pgS2gfa+lQq45g+oDasH0ysvfba+Q/xdFeCp59+2nd5HiOAAAIIIIAAAggggAACCCCAwBQQIBAwy7IrrriiuDiiC0A33njjmKrVFzrzzDNPsZ6+wKlK+oWqXdysCpzwt6BSsIe/jYh/XJU3ryGAwN8Fhtln/S1L1bd9AJOCfqqCbZrqSTOf2RihpYJrfLKLgjvttJN/OQ8AWXrppYvPfv/73x94v+1JeTYTPwucgjnWX3/9Im+V64tf/GJblrXv64KpZuCx/fS38dUFad2i7qKLLsr8RQKtu8kmm9TepnY88tQOrL766kU53/e+99XuU8gbfdvK/vvvX5RBDnXBH74MTV/+//73v8/WXHPNIs9FF100e+aZZ/zHMwULaFYgqyPdrlczNjz44IP5epr97cwzz8ze8Y535Ov4OhzIKOCJZmKYe+65i20pUEz9oJy+9rWvFeuoXB/+8IfLqxTPy8fkuoAkHwhy3nnnFZ+3B239UetpVkKb/Ufl0r4cfPDBmYK01J517L7sssuyddZZJ3vPe95jWecXV/15Q98yFhk2PNDFmwUWWGDAb+ONN86DFu+44468vjfbbLOBOtdMWOXU5GGBgDKwdmKfV9uxtqSLSj719VMeKrPlqzpQALQuXP3yl7/M26tu+Wnva6mgHktdx9Ku62s7Mf3oc5/73EDZFaCtQJpf/OIX2U9/+tNMtzZV3/X7V3cLYdvnpuXee+9d5KV9Ladyn+p7nlvOt+q5n8lIx7VHHnkkX00XazXW+Iut6kNdj7PKbNj1abczV31pLKhKfoYujdFVKeTvhJh2l6Ke+/bpmHJXWbW9tueeexZtXvXyqU99asxHNIb7YBGt95vf/GbMev6Ftr6kdUPqUev5dlOeKde26YPpFbBXThrrbZxYcMEFx5R/jz32KN5X4LT1N59PzFhr+fQ9B7LP+2XsGJGinfvy6HGfc7VyHuXnfcYp5TGR54B9+7/KHdJ3Ntpoo6K9HnrooWWy4vmHPvShYr1Pf/rTxev2IKaclkfdMvZvqhTnbCny0P41nff5/Q8d00LqOHbMiu3fsfUnF/2dZOOulvpbp28KqQN/rIg5xwjpX7H1Iwc/hp944omNNCH1eemllxbe+mFg1Q/F/Ay/22yzzcA2fXn6nneG9oGQ+ow1DjETQNN309tuu21hqjZ8+OGHD5jxBAEEEEAAAQQQQAABBBBAAAEEJr/AtA4E1K3YNAOWZkPyX+QtssgimS4u65fWur2evtD0wSJaVzMt6QKTBe/pgq3/Qkfr6GKNLr7cf//9RUvxX75pHW17rbXWyl7zmtcMBBAUH+ABAggUAhPRZ2+66aaB8UH99q1vfWv2lre8JQ8Ovuaaa4ryNT1QgINmINDnlIf9W3XVVfNxQkFFSnZRUO9rbNCsXQo6sluq6vVddtmlaVOV7ylo0X8ZrHw0O469pqVm47Ny6RZz2o4u3Ieme+65J9twww2zZZZZpshH+emCpb6AnzFjRjFrmG1HSwUwzZ49u3Iz45GnbomnsdjPFqJyKDBOtwTUew899FBleZpe7NtWFKhlHvPNN19xC9+mbfmLwNoPXfyYOXNmptvw+pnGdLGjasZA5V0OvLMyKCDJHmupwILvfe97TcVpfc8CCpWffKuSLqr54MRzzjlnzGq68LPPPvuMOSbr4ow/Jivgxgc2aLuy2G233bJbb701C+2PVoBZs2YNmJiPghrtsZY6d0hVRtt26FIXvW1WPl8me1wuq54r4Fi3oQ7x8IGAaiO77rprHsSicyTbrvqy2nM5dfHzn73uuusGfG1fbOlnsdJrmnHMzru6jqVd17dy9u1Huoi3wQYbNO7f/PPPP/C+gqerAh6sLFXL008/PdNsii95yUuKvDTO6LZpOqamPs+tKkP5tcMOO6woi+pN7WfJJZcsAm51kdvqWEs5qd/LLDQNqz4VUKBjpQ9+V5k1++ZZZ52VF1fHd409iy22WLFfuhW6yuh/XKCVQ/9O6NruUtdz3z7dtdyh9V21no5bvh3pGF2Vdt5552I93Za+LrX1Jf+5tnrUzLJV7UbHcGs3d911V95ufEC7jpFax89aqLr1f6cqkFb9W+ePOsc0AwUW/+AHP/DFLB7HjLWWSd9zIPu8X/YdI/R3ecq/232Z+pyr+c9XPe47Tk30OWDX/h/Sd/QDJQWm2PmE2q2CdNU/FQRlSed322+/fabjmLVt/Q2j9b7xjW/YavmyazkHPtzwJMXfVDHnbFa0mDxCzvtsO1q2jWkhdWz59R2zUh3HYurv5z//eX6e6f+uUTtUcL/+Drn55pttN1uXIXWQ6hwjtH+p0H3rR59VX91qq60GfgCmY5KOd+UfIIXUpwIs5erPn+StHx3ZcVDHL/094//O0fmzjoN2jtX3mOLPO9v6QEh9WqPoaxxipm2EfDet7zxsDNVS3yOQEEAAAQQQQAABBBBAAAEEEEBgaglM60BA/+W7/xLEHutLrPIvOu09W1qQX/nLQHtfS3/LRX05qAAb/74eL7XUUsUXVVOribE3CKQTmIg+q9Ir4KXcZzVL0aabbjpmZqy6vVXwWTkP/1wziinpi1sLzvPv67EuiB177LEDt92s217V65rlRsFQPl/9qn6//fbLZzI45ZRTBt7Tep/4xCeqsqp8rXwbS78de6zZ1BRYpqBDzQD49a9/vTHobjzy9AFNVq7ysiqYqXKnSy/2bSsW6KkLmSHJXwQul13PFWSuCy66YNCUFMiqi1c+AE+f13O1FV04eu6555qyCHrP39b67LPPrv2MBWeqf1XNfvb888+PaaN+/+2YrAtE/nX/+Nxzz81C+6Mv6LXXXpvP9OXzsscKftVsbipzqjL6bYc+1kzEK6+88kB9KgBMAYq69axuCWlltuVxxx0X5KHboCkI2s/uaHloqQtkCrKsS6F+5c+rfOX2rrFQF6wUdOlvDWzl0XlX17G06/q+nH37kS4wqs+Xg28VsKNbhmrMtn2ypfp2l7TjjjuOycPyUpBQ6vPc0LL52W2sPGussUZ2ySWX5FksvvjiY8rd5bZlw6pPHT+t/OWl3c66HJDm19N5hE9d/k7o0u7Go5779uku5fY2XR/r9qg2duicoy75mYYOPPDAutWytr7kP9hWjyHtRkF7vq34x5pV3ieNhQqy9D8EsPV1nqcZdhWk0JT6jrU+z77nQD4Pe9xnjLj33ntrzeRh5wihf7dbWWzZ9VzNPle37DtOWbu2Oi4vh3EO2KX/h/SdclCK3ycFw1ryM177dfRYfyeWU5dylj/b9DzF31R9z9l8ufrm0fU8uG1MC6ljX+4+Y1bK41jf+ivPfFpugxbI7fe17nFIHYQcK0LOMUL7l5W1T/3os/pxc9nEnmtmb59C6rNpTLcZco8//vjabeq7Bkt9jin+vLOtD4TUp5VFyz7GIWbKO+QYpx9hWt3o75D77rvPF4/HCCCAAAIIIIAAAggggAACCCAwBQSmdSDgRNWfbtGo23qedtppmS4+6TaZJAQQGG0BBdHo1qKapUwXldoCrGL2RreqUyCaxgnNsKBx4vbbb+8dAOjLosATzRB3xhln5Lef1IVyUlqBrm1FdaDgSH0ZXzdjUbmE/iKwAjjVRnRM0QVBBWx1TTouXX/99Xm7UPsu30q4a35TeX3NFqnbpqoPaRYKf5FoVPZbt+7VxUiVM/VYpYtQN954Y357b90+Wu2myy3R+vjpdtdmru3p4llI6jqWdl2/XIa+/Ui3PtfMjKqz6XROqIummnVWs6Ppceo0UfUZux9d/07o2+5iy2mf79On9dmJLreVf7yWXesxRTl0G+3f/va3+TmBgt4V7NLl1tp9x1pf9q7nQP6z5cfjPUaUt9f0vM+5WlN+9l6fcWqUzgH79n/b/2Etx6Ocqf6mSnHOliKPtroYjzEtdsxqK3PT+6nqr2kbk/29iayf8bCLPaZMtT6g75cuuOCC/Eds4+FNnggggAACCCCAAAIIIIAAAgggMLECBAJOrD9bRwABBBCY5gI2E6Nm5wpN/iJw3a1/Q/NiPQQQQAABBBBAAIF6gT7navW5xb3DOWCcH59GAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSmugCBgFO9htk/BBBAAIGRFdBMYDNmzMhnA9TsPaGJi8ChUqyHAAIIIIAAAgj0F+h7rtZ/i82f5Byw2Yd3EUAAAQQQQAABBBBAAAEEEEAAAQQQQACB6S5AIOB0bwHsPwIIIIDA0AROPfXUbLHFFsuOOOKI7Kmnnsq++MUv5kGAK664Yqbbw4UmLgKHSrEeAggggAACCCAQLpDqXC18i93W5BywmxdrI4AAAggggAACCCCAAAIIIIAAAggggAAC002AQMDpVuPsLwIIIIDAhAkstdRSeeDfXHPNlc0333z547nnnju7+uqrg8qkWWkuvfTSTJ9RHvp37LHHZk8//XTQ51kJAQQQQAABBBBAoF4g9lytPufIkJLyAAAgAElEQVS4dzgHjPPj0wgggAACCCCAAAIIIIAAAggggAACCCCAwHQRIBBwutQ0+4kAAgggMOECyy67bBHAZ4F8M2fODCrXH//4x+xFL3rRmM9bPltvvXVQPqyEAAIIIIAAAgggUC0Qc65WnWP8q5wDxhuSAwIIIIAAAggggAACCCCAAAIIIIAAAgggMF0ECAScLjXNfiKAAAIITLjANddck6277rrZIosskr3rXe/KLrjgguAy/fnPf85222232n+zZs0KzosVEUAAAQQQQAABBMYKxJyrjc0tzSucA6ZxJBcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDglK1adgwBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGA6CBAIOB1qmX1EAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCYsgIEAk7ZqmXHEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEpoMAgYDToZbZRwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSkrQCDgCFTtL37xi+zQQw/Nttlmm+xb3/rWCJSIIiCAQJMAfbZJh/cQ6C7wt7/9LfvZz36WHXzwwfmx8LzzzuueyRA+8ac//Sm79NJLs7322isv59133z2ErY7+JnAZ/ToalRJOlr4+Kl6TuRx33XVXdswxx2TbbrttdtRRR03mXaHsU0yAY9YUq1B2BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBAgEDAAY7hPtlvv/2yxRZbLJtrrrmKf3vvvfdwC8HWEEAgWGCy99nHH388+8xnPpOtv/762bnnnhu836w4uQQuueSS7AMf+ED2sY99LPv9738/8oXfY489skUXXbQ4DuqYeNBBB41UuR944IFsk002yeabb76Bct5www0jVc5hFwaXYYtP7u1Nhr4+isKT7dh93HHHZcsuu+zAWLnBBhuMIi1lmmYCHLOmdoVPtrFyatfG4N5RN4MePEMAAQQQQAABBBBAAAEEEEAAAQQQQGC8BaZ1IOCuu+6abb311p3/ffSjH82effbZ6LpZZZVVxgQC7rPPPtH5kgECU1WAPhtXs/vuu29xYX7uuefOdEGUNLUEHnvssWyeeeYp6nnnnXce+R2cMWPGmGPhqAUC3nTTTXkZy8H70z0QEJeR714jVcDJ0NdHCuz/CjPZjt2a4bw8VhIIOIota/qViWPW1K7zyTZWTu3aGNw76mbQg2cIIIAAAggggAACCCCAAAIIIIAAAgiMt8C0DgR81ateVQRL+Fn5Qh6nvB3gbrvtVpSDQMDxbvLkP5kF6LNxtbf22msXY43Gudtuuy0uQz49cgI/+MEPBup4iy22GLky1hXogx/8YFH2UQsE9GX+p3/6p6Kc0z0QEBcvwONQgcnS10P3Z7zXm6zH7v/4j/8oxkoCAce7lZB/VwGO5V3FRn/9yTpWjr5sfAmpm3hDckAAAQQQQAABBBBAAAEEEEAAAQQQQKCLwLQOBPzjH/+YPfTQQ9lHPvKR4kKVZlK6+eabK/9tttlmxXoEAnZpZqyLQBoB+myc4+zZs4sxbPXVV4/LjE+PpMDzzz9f3JLxpS99afad73xnJMtZVajJEhw03YIH1KZ233337JRTTqmqtuK1yeRyzjnn5Pv06KOPFuWfzg9C6ziV0WTp66n2NzafUTx2h/QhAgFja77+88Pus/UlmbzvTKZj1uRVHm7JhzlW0gf/XrehDsOsm+G2OraGAAIIIIAAAggggAACCCCAAAIIIIDAaApM60BAq5Ivf/nLRXDMvPPOay+PWV544YXFegQCjuHhBQSGJkCf7U/929/+NvvRj36U/e1vf+ufCZ8caYE///nPmWYGfOSRR0a6nOXCTZbgoOkWPHDiiSfm5z7vfve7y1U28HyyuPz1r38tbluqsZCUZaF1nMpqsvT1VPubIp9ROnaH9iECAVPUfHUew+6z1aWY3K9OlmPW5FYefumHNVbSB/9et10chlU3w291bBEBBBBAAAEEEEAAAQQQQAABBBBAAIHREyAQMMuy0KCiX//61wQCjl4bpkTTUIA+Ow0rnV2e8gKTJThougUPrLDCClMqEPD8888vzuUIBPz7sBJax6kGocnS11Pt71TLJ7QPEQg4fjU/7D47fnsycTlPt2P5xElPzS3TB/9erzhMzfbNXiGAAAIIIIAAAggggAACCCCAAAIITH4BAgE7BAK+8MIL2UEHHZT/e/LJJytr/+mnn87mzJmTXXnlldkPf/jD7He/+13rzFu77bZbcVF6n332qcxXL/7pT3/KfvWrX2WXXXZZ9uMf/zj7zW9+k/3lL3+pXd+/oXJcfvnl2bXXXptpP0gITGaB0EDAie6zMn744Yfzfqf+d9NNN2VPPPFEEP149dn77rsvu+SSS/LbnzcVpG+5m/Ksek/jmmavU6B10yyFMR6aGe/73/9+PhOiZqMITY8//nj+uZ/85CfZH/7wh9CP5etpbL7rrruyK664Iq//xx57rNPnyytrH5TXHXfckf3v//5v+e2B52r3V199dV7Pup32eKX/+Z//yX76059ml156afbAAw903oxM7Fh55513ZptvvnlxLNSxdlRTXfDA73//+7yNqZ661nespVl1aSf2mablqaeeWtRJ3xkBY1xi+n3VfulWwO94xzuKfQoNBOxzble1fb2mtn7VVVdlyrMqpRg7urSnLnVcVd6Q11L19abjhY5Z3/3ud7Mbb7yxtkgPPvhgPi7ecMMNtevojZg6iDlXbypU6LG7nIf2RSZq612PY+W89LxLH6oLBNQx7Lbbbsu+973v5ceQpmN/uQxd2nb5s23P+9ZdqjKFjt8xfTbFmBpazrK3Ziu+5ZZb8r9jb7755uyZZ54pr1L7PEW5y5mnPJarDd97772ZzhfVrm+99dZsPM+/yvui56naYVXeVa/1bQcak+SjcyWdJ2uG0ZRpGGNl3z6Yoo6ajoNyTNEWQ/tqV4eudRPzd1i5TfVtr+V8eI4AAggggAACCCCAAAIIIIAAAggggMBkESAQsEMgYFOlnnnmmdm//uu/Zi9+8YuLC8xzzTVX/niRRRbJzjvvvNqPtwUC3nPPPdk222yTvehFLxqT9wILLJB9+tOfrsxbwYrbb799ttBCCw18br755su22GKL4ICkysx5EYEJFAgNBGwq4nj2WV2EOeWUU7IlllhioO/ZmLDccsvlAV3l8o1Xn1XQ8Oc///nsLW95S1Ge7bbbrrz5/OJRn3KPyaj0ggIHTj755GzjjTfO/uVf/iV761vfmq+hi6ULLrhgUaYtt9xy4JMxHs8++2z28Y9/fMz4pzpYdtlls7PPPntgW/ZEwTkf+9jHste97nVFuazell9++eyCCy6wVSuXCojbcccds5e85CUDn/+Hf/iHbPXVV8+++tWvZrNnzy7+/exnP8vz0ee+9rWvZe95z3uyV73qVZksVJbDDjsse+Mb3ziQ1+KLL54HF/oC6MLqOeeck334wx/OXv7ylxfrlwMfFQT1z//8z0H/lFdVuuiii7KVVlppzPFuxowZ2emnn171keI1BXxuttlmlb7mrGWfQMAVV1yxdr90AdAnlV8OSy+99EBgpcxf85rXjMnnc5/7XPFxHzygi/4KhFxvvfUGjtE6F1A70MXUptTVMradNJXFv6dgDXn6OtE+lduOD0pN5RLT7/0+lB9/5jOfyf7xH/9xYJ9e9rKXDezTzJkzBz7W5zghE/3oYv/998/HGpmpb2pM+tCHPlRsf+65585/OGIb7Dt22Oe17NKe+tSx31bb4759vcvxQkEFRx55ZD62aoxVe91ggw0Ginb//ffn66y22mqZrbP++usPrGNPYuqg77m6bbtqGXLs1tivgOo999wze8Mb3pC355///Od54N+GG26YzT///EWbk4/WUzBKn9S1D5UDAVW3n/3sZ/Mx1o8tOt62BeV2adtd961v3XUtU+z43bfPdh1TY8tZ9lfg8yqrrDLmvGieeebJ/75VsHhV6lruqjyaXktxzFIAsvqUzht8m9ZjjTc6jyr/MKDpXEV93tJWW201cHzSsWTllVe2t4tl13ZYfLDlQep2oHMlnYOWj8NqB7vvvnv+w6mWItW+Payxsm8f7FpHXY6DhtKnLdpnbRnaV7s4hNSNbV/Lvn+HpW6vvkw8RgABBBBAAAEEEEAAAQQQQAABBBBAYDIKEAgYEAioi2y6kKYgkKq00047FV/+K7hAX2YriEMBd/6igGa9qkpNgYD6InjRRRct8llyySWzj370o9m73vWu4oKmAkfKSV/kvva1r80/pwsRa6+9dn4xQgEkViZdMNRFVBICk02gLRBwIvusLBW8Zf1s3nnnzd73vvdlH/nIRzIFBdvr11xzzQD7ePZZBSTZdm1ZFQjYp9wDO1Hx5IQTTsgU8GLb1VLjmALxysHNG220UZFDjIdmY/WBfGuttVZeJwoMtEAQLRWI6NN11103ELypMV+ztG6yySYD47nGdx8AZXlohiMfdCH3r3/96/nn/f77xwqsOPTQQwd87P1XvvKVla/rfQX7+YvnugBmn/PLciDgMsssk6+nC68K3PP/yscsBS35pNlt/PFO7VkW66677kBdqh1VpW9+85sDjjoGaRu77LJLts4662Qqk5W9TyCgLsa//e1vL/JQXrpl2dZbb5099dRTRZE0O5ZtR0sFbVnSBUit/6Y3vSlfR+1E+3fGGWfYKpkPHvD17fO0x3vvvXfxOf+gj2WKduLL0PRY/VPtz++rAgH1mv/n+4Fft69LTL9v2h+9p0Bold3GANWRfkzh9+fwww8vsvFtvcu5nR/nrR0cf/zxmQLR7LktNUusUt+xwwrbpz31qWPbXtuyb1/verxQ/zJLW5YDATWG23u2rAoEjKmDvufqbY5tx26NV694xSvG7J8C7m1fq5Z9xleVtWsf8oGAKocfI8rlUnCQZn0rpz5tu5xH0/M+ddenTCnG7z59tuuYmqKc5q3ZiX0b1o8cdt5550ztQmOqtQGNjeXUtdzlz4c89+2xzzFLP+RYeOGF8/3Q+ZPOhzQm6dzR9k1L/Q3uU9W5in4gpKBBBXNZ+sIXvpD/yNDy0o9o/DlFn3ZoebctU7YDBR6r3HbslfWmm26an1/buZb2Ua9ff/31bUWrfN+3M/Pyf+ekGiu79sE+ddT1OCiQvm3RMLv21S4ObXVjZdCy799hKdurLw+PEUAAAQQQQAABBBBAAAEEEEAAAQQQmMwCBAIGBAJqpgh9qayZq8pJt7axL5y11BeYlp5//vmBoISqC49atykQUAESlr8unvh09NFH5++VAwE185HNCKUv1XUrYUsqky5IWJ7+S3JbhyUCoy7QFgg4kX1WAVkW+KZANx9sq9s52QydPhBwvPusAhx0C64PfvCDtX2/T7lD2om2q0AMBfnZuKOlzZinQDQF4ug1CwSM8dBtpHwQhALtfHrpS19alEMXpyxpm5odzMqosdcnBewoaMje32uvvfzb+S3XNdOgva8ZGH3yt73VxWeN3/qnW+uef/75udGaa65ZfF756ALpIYccks+ypxkV3/nOdw68f8QRRxSb0IVWWevWmFYGLesCAf/rv/6r+KweaOY6PwOcyqjbEPuk2Wct73/7t38buK2fbjdt76nd6/anPp100knF+6r74447zr+dP/bts2+gii66+tlmFJBUTvvuu29RFpVZM7eV03/+53/m6yiwsJx88IC2pcBBzeao2eMOPPDAgWAXBTfqAnQ59bFM0U7K5Wh7bg5y6nJr4D4uMf2+bT/8+5pVydpq3SxkMed2n/jEJ/L+bGO9bUtLjSE+SETjii7A9x07bL/6tCf7bJc6ts80LWP6etfjxe23356Pezo/NudyIKCt48+1y+fjsXXQ51y9ydDeazt265xex1f98zOSW//TOKuADY2nPtBJj2NuxxnSh7QP5UBABTsdfPDB+THtxBNPzBSkb/Wm5QEHHGC7Xixj2naRScODPnXXp0wpx+/QPttnTE1ZTh+cox9n+HMKBV37utc5qKU+5bbPdlnGHst1HmT7oB8M+HTuuecW72kdzR7sk865FBhpnz/mmGP828VjHSO0jgJ+dX7jU5926D/f9DhlO/Dl1Pn+nDlzik1rBnXd9cAcZOJnRSxWbHkw7LEytA/6fQ89b+56HBRNTFvU5/v21RCHtrqxqo35Oyxle7XysEQAAQQQQAABBBBAAAEEEEAAAQQQQGCyCxAIWAoE1BfRCmjw/z75yU/mX1BXBQLqIpt9ea2lDwRU4/jSl75UvP/617++sr34i5O6mOfTm9/85uLz5dmrLOCjHAi47bbbFp856qijfHb5429961vF+wrYePDBB8eswwsIjLKADwQctT6rW8famKBbYJWTbo+m930g4LD6rILirGzlIOA+5S7vW9NzHyimMuiisM2GpeA9BbzpQo5SjIe/9aZmTi0nHwjob/cqD7PRrdeqgiROO+20Yh3NbKJbX1rSbBz2ec0CqQANn7yv1vMXvW09HzShW82Xk2Zf80GOcionBQRaObSsCwQsX5T2Fyv1uXKgnoJpzE4znj3zzDPlTeczPdq2dVyzpHV9UJRuP12VUgQCKl/NwGnl2H777Qc2pYvONluuraNZEstJgUJ6v3yrWK3ngwd+/OMflz+a3yrY8tbSbv9sK8ZYKo8U7cTK0rYMuchrecS6xPR7K0PIMiSIKcW53dve9raiHaod7LfffpmCwZU01ik4S7e+jB07YttTlzpu803V17scL1Qm3yfKgYBWZgXaWL8sBwLG1kGfc3UrV8iy6dhtn2/rf7otve2/lvrBRN8U0oeUt6+XqmOaxuNVV121KJduZexTbNv2edU97lp3sWVqMwk5zof22ZgxNbacCqa2cwa1t6uvvnqgCnwgoAJTfcB8TLkHNtLypK3P6Fa2vs+Uj+X+fKwcCKjgYvtRnvL4xje+MaY0mhnQ8q/60YE+oLsMaJ3yj09i2+GYwtS8ENsOFPzp24HOhcupHIhdno26vH7T82GNlSF9MLaOuhwHY9piTF8NcbD6aqub2L/DtJ3Y9mplZYkAAggggAACCCCAAAIIIIAAAggggMBUECAQsCIQ0L6ULy+rAgEfe+yxPFBAM1zoXzm4w19c1AWHqtQUCOgDFhQk4W/Dp9tC6mKyD0TQhT279Y6W/sKKbVtltnW0j5dffrm9xRKBSSFQDgQs91V7PhF9dtasWcWFPQWElS8cKgBQ/faJJ57IrYfZZ5suwnQtd9eG4i9oKYigLgA5xuP+++8v7NUGvv3tb48p5h577JHPvqVbsGn2CaW77rprYExUwERVUpCdzV6o/H2wm24BbO1u6aWXHvNxjcX2vpZ+plZb2V/Aqgtm0a3hLZ+qGdraAgE1C+J66603cFthHUNsFkvlXQ6SUfkUUGfbVXB8VfIXtf2t/hTwZJ/VzGf+OObzSRUI6IO4dAz1STPAqSyLL774QF3qgq2l5557rrhNsQ/2tPd98MANN9xgLw8sfQBAuR3GWGojKdrJQGEbnnS5yBvjEtPvG4pf+VZIEFOKczsfCKhA27oUO3bEtqcudVy3D/Z6qr4eeryw7Yb0iaZAwNg66HqubuUOXTYduy2Ptv537733FuOwxsC62TAtv6ZlSB/S57vWi2497FNs2/Z51T3uWnexZQoxaTvOh/TZ2DE1tpw77LBD0d50S9tyuvbaa/MZ6lXnp556avF2bLmLjAIetPUZZdF0LNexXT9i0d//+pFdOSmw1c595FlON954Y/G+1ivPhKcZAG37v/zlLwc+HtsOBzJreBLbDvxsf69+9auzv/zlL5Vb89vR7KZVtwmv/GDpxWGNlSF9MLaOuhwHY9pi374q+hAHq6Kmuknxd5i249tR37+jrLwsEUAAAQQQQAABBBBAAAEEEEAAAQQQmOwCBAKWAgH15bMu2Pp/CrbQF/RVQUV1DUCBBFdeeWW29957F1/y61ZdVakpENDPiqAyrLHGGpkuHNSliy66qNiebr+ooI+qf3ZhQssTTjihLjteR2AkBXwg4Kj1WV3E9P1L/V6zipVniDPYYfbZposwXctt5Q9d+gtaSy65ZO3HYjz8NhTsrKC4kFS+hZu/nXP581tttVVRv34mOX87TAWA2sxf9nld5PLt4oc//KG9VSxDLmD5IDfdKric2gIBy+s/++yz2RJLLFGUTbdle+ihh8qrZSuttFKxjma6rDqu6IK+7aMu+FrSTDf2+mGHHWYvj1mmCgTULIi2PS19MJ/NrqOAJX981ey9lqwN6rbVVSkkeEDBoFaGiy++eCCbGEtllKKdDBSo4UmXi7wxLmYus/E+dwkNYqpi6XJu5wMBq4JELP/YsSO2PXWpYytz3TJVX/djedPxwsoR0ieaAgFj68CPJWrDbefqVu7QZdOx2/II6X9+1qiqY5Dl1bYM7UMh9XL66acXY6Vm4/Uptm37vOoed6272DKFmLQd50P6bOyYGltOP/5V3fK5rj5iy12Xb9XrIX2m6VheladeU0Df7NmzM50j2nmAP8fwn9N5hq2jgEKfFCCp91ZffXX/cv44th2OybDmhdh2oNuB2/4pwLUuaeZAW0/LM888s27VxteHNVaG9MHYOup6HKyCCWmLffuqthfiYOVqqpsUf4dpO7Ht1crKEgEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgKRBQARzldNttt+VfTrcFAuoWb/pCVLe/8rfBsS+2+wQCzpkzJ9NtGC0PW77//e/PdCuXcvK3WrJ125ZHHnlkORueIzDSAj4QcNT6rOB8ALD1P12AVyDCn//85wHbYfbZposwXcs9sBMBT0IvaMV4+M8uvPDCAaX6+yqHHnpoMcZqZryq2wJbZv4WuvPMM0+mWxsq3XnnnUUeqvPyjH/HH3988b6OD5rppZxCLmApcNvaVIpAQAX0WX5annfeeeVi5TP46fjl12t7PN988xX5vOxlLys+e9ZZZxWvlx+kCgRUvv7is83wqHq1WxQrOPDCCy8syuUvtO+000756+ovVSkkeOCNb3xjkbcPBNRsiDGWKk+KdlK1X1WvdbnIG+Pi+25b27L3+567hAYxmUffczt/cb0pEDBm7EjRnrrUsZnULVP19dDjhZUjpE80BQLG1IHK0PVc3codumw7diufkP6nAG3rP6MSCHjGGWcUZfKBgCnadohvl7pLUaaQttp2nA/ps7Fjakw5y06acTM0xZY7dDtaL6TP1B3Ly9vRzMq77rpr9rrXva5oz9bXtKwLBPT7u9RSSw1ku9Zaa+V5lW8rXPb126l77M/JBjbS8iSmHWj2P/99SDnQ0W9aP57xZZdLnzSssbKtD6aoo67HQfPq0hbL5ezSV7W9Ngcrk5ZNdZPi7zBtI6a9+rLyGAEEEEAAAQQQQAABBBBAAAEEEEAAgakgQCBgQCCgAjY0u1RdIKBmT9LtX+zivtZVMKBmGFEwiH2x3ScQUI1MsxutvfbaRT6WnwJW9tprr4HbLPrbSenWjCpb2z/NcENCYDIJtAUCTnSfleWxxx6b+aAI67eafc3fLniYfbbpIozVf2i5bf3QZegFrRgPfxuuN7zhDaFFy7bbbrtifNWtf5uSZrSzutRYrxn1LPnZsDSTrI2tus3oCiusUHxun332sY8MLFNcwOoyI6AC1GxftNTMSFVJMyT69TRjT9txxW67/PDDDw989qqrrqraRP5aykDA/fffv9juxhtvnOd/xRVX5K9ZsInGCQsE0PH00UcfzQM7F1100Xy9n/zkJ5Vltc/IpO7WwHXBAzGWVpgU7cTyalt2ucgb4xLT79v2ofx+aCBg7LldaCCgytd37EjRnrrUcdnSP0/Z10OPF7b9kD7RFAgYUwdWhi7n6vaZ0GXIsTuk/02mQMAUbTvUN7TuUpQppK2mCASMHVNjylm+DfU3v/nN0KrIYssdvKFEgYAK8l5++eWL8w31sY9//OOZznV07mHnTnWBgPfff3+m8w9bz8479GMFvfaKV7xizA9HUrTDUKeYdlCenfkrX/lK42Zf9KIXFQ6f+tSnGtete3NYY2XbcTNFHXU9DvZpizF9VXXQ5uDrqaluUv0dFtNefVl5jAACCCCAAAIIIIAAAggggAACCCCAwFQQIBAwIBBQFX3LLbdUzsB33XXX5QGC9gW+Zmi66aabirZhswnq/b6BgJbZpZdemq266qrFl+S2TV1wsKTH9vqKK65oL7NEYEoJtAUCamdHoc8+/vjjmQKS5p9//qJfqn8qEOXuu+/O62SYfbbpIoxvICHl9uuHPA69oBXjse+++xbOmoVEM12EJF1wtCOTzBcAACAASURBVHFTFyKbZgQ88MADi3U1w4rNCKjtyO1Nb3pT8b5mgVxnnXXyC7mWv24tXJd/igtYoYGACk602fFUtsUXXzx7+umnK7meeuqpYp+0rmbSC00KCLR911K3gaxLKQMBr7766mK7L3/5y3PzHXbYIX/NzyS36aabFuvNmjUrD9JVORUM6OvWlzkk4KYuEDDG0sqQop1YXm3LLhd5Y1xi+n3bPpTfDwkETHFu1yUQsO/YkaI9danjsqV/nrKvhx4vbPshfaItELBvHVgZbBlyrm7rhi5Djt0h/W8yBQKmaNuhvrZeW92lKFNIW00RCBg7psaU85lnnimOqzqe6gcUoSm23KHb0XohfabuWK7zg4985CPFfuqHP0cccUSmme0sbbHFFsX7dYGAWlc/HrTzJP3AT8nOS/fcc0/LrlimaIdFZi0PYtqBfgxj+6Vl04yAmi3dr6vt9knDGivbjpsp6ij0OBjTFmP6quqnzcHXYVPdWHtXG4j5Oyymvfqy8hgBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYGAgYF1lz5gxo/jier311hsTOJAyENDKcP755w8EcCjI6IUXXsjf9l/IKhBGFzZJCEw1gZBAwLp9nog+q5nGFADmL3LNnDkzL+Iw+2zTRZgqr6ZyV63f9FroBa0YD90C1hsrGDQkHXfccQOf0wwtdWmbbbYp1l1llVXGrKYZAi3gRDMG6uLwSiutlG299dbZ9ddfP2Z9/0KKC1ihgYB+phrNRqPAuabkg6cUcNkl+YvtBxxwQO1HUwYC6pZ0mknH2oNug6nZHrWvv/vd74oyaKYiW+cDH/hAdsghh+TPd9ttt2Kd8gO/P11nBFReMZb6fIp2Ut6nuue+P2qWy6YU4+K3M97nLt7/Rz/6UeUupThOdAkEVCH6jh1+f7r2TW3X27fVcSWWe9G3gZi+Hnq8sE37PrHuuuvaywNLG5fV39dff/2B9+xJ3zqwz/tl07m6Xy/kccix29vXjUuTKRBQLrFtO8S2ap2muostk2+rG2ywQdXmsxSBgL5f9xlTY8v5qle9qji2KuA+NMWWO3Q7Wi+kz9QFAp511lnF/ilwqeocKjQQ8NRTTy3y0g80nn/++Uw/JNFY9ctf/rJyl2LbYWWmFS/GtgPbD+2LfoxRl/TjKDsX0/K73/1u3aqNrw9rrPTttO64GVtHocfB2LbYt6+qIkIcrMKa6ibV32Gx7dXKyhIBBBBAAAEEEEAAAQQQQAABBBBAAIGpIEAgYEQgoGZU8l9a6wvbcooJBFSwwhprrJHtuOOO5WyLWYts+7oFjZKCHew1LY8++ugxn+UFBCa7QN9AwPHus3L9/Oc/n/fba6+9doBZMzYo0Mj6p25/pjTMPtt0EaZruQd2LuBJ6AWtGA/dUs18tdRsLSHp5ptvHvicLkhVJQWX+Vn0dtppp6rVsr333jvPb7PNNqt8v+7FFBewQgIBZ8+ePbC/++2335giLbPMMvmF6AceeCB/T7Pdmu2CCy6YaTuhSbPT2mflp1vyVqWUgYDKf8sttyy2q6BNlUH74dOTTz6ZvfjFL87f06y9NqPj5Zdf7lcbeBwTPKCMYiz1+RTtZGCHGp74i7y6fW1TinGJ6fdNZap6z1+crwo2SHWc6BoIqLL2GTti21OXOq7y9K+l6uuhxwvbtg/yq5sN269TFwjYpw76nKtbuUOXTcduyyOk/41HIGBVH7IyhYxVZ5xxRjFO223b7fOxbdvyqVv2qbvYMoWYdAkErBuXY8fU2HKqj9lxXwH4d9xxR101DLweW+6BzFqehPSZukDA3Xffvdg/jfVVKTQQUDPnaYZp87LbI6+22mpV2eavxbbD2oxLb8S2Az/r8sILL1w7I7ZuG2z7rx/R2LlnqTitT4c1VoYcN2PrKPQ4GNsW+/ZVVUaIg1VaU92k+jsstr1aWbW0H9n613iMAAIIIIAAAggggAACCCCAAAIIIIDAZBIgEDAiEPCJJ57I9GW1fXGtLx/LSTNA2fu64FCVNPOQrbPPPvsUq9x5553567poXU76cnLeeefN39dMD7qljqUVVlihyE+/xK+bFUuBSaeddlp25ZVX2kdZIjApBPoGAo53nxWeZn5Tf9bFkXJSsJ31dc08ZmlYfbbpIkyfclv5Q5ahF7SUV4yHLoqbsWZp+c53vlNbPN26y5Ju4Wufe+tb3zpmdletd8EFFxTraNyturCtPLVd5aXAz8suuyz79a9/PTBG2zbLyxQXsNoCAX/7299mCyywQLEfb37zm8eUzd8q7N57782LqRmSzEfLXXfdtdJIK+sC7kEHHVTsng/A0Wd16zsdf8pp8803L7bhP19eL/S5n2XHyn7SSSeN+bi/LZ/W062Emy4AxgQPaOMxlvp8inYyBqHmBX+RV0GcTbfbjnWJ6fc1xa982QcCahbRckp1nOgaCNh37IhtT13quGxVfp6qr3c5XqgM5513XjF2aPy95557ykUrZmpVH68LBOxTB33P1ccUsOGFpmO3fSyk/41HIGBVH7IyhYxVTYGAsW3bylG37FN3sWUKMekSCNg0LseMqbHl1G2W7bir5Xve85581tOqunj66acHzgliyl2Vf91rIX2mLhBQ5zG2f5p92P8dbtt773vfW6wjz6a07bbbFutavqecckrtR2LbYW3GpTdi28FVV101sF8XX3xxaQtZfl6hmbNtv7fbbrsx64S+MKyxMuS4GVtHocfB2LYY01dDHKzu2uomxd9hse1VZf3Vr36Vrb322vmPhN7//vdnGp9ICCCAAAIIIIAAAggggAACCCCAAAIITEYBAgGzLPviF79YfPn8kpe8pDEAoFzJfvaTpZdeOnvkkUfyVXQh+Ygjjhi4rdU888wzcKHD8tKsUvblt37VbckuUOm9Bx980F7Ol9/61reKz+jLSp/0JbsPUNTtELW+5aEZMM4888zsHe94R56HyklCYDIJjGqflaEF1JVni1MAjcYI6+vf//73C/Jh9VmbbUplUDl96lNu//m2xxdeeGGx76997WsbA4piPDSTmwXiaT81E83BBx+czZkzJ9/mH/7whzw4TxecdGHa0nXXXVcEV+tzPihb6yiYz98+Sxe0qtI111xT7KfVtZYqkwIM999//8ogFeVlt6XV+ipfVfK3P15zzTXHrKIZ7vx2dRyxpDa41lprFe/rmHTrrbfa28XS74MFAuqzNque5a/bXSvYXRfA9U+GctHFdQWqW9JFNB98qM/r1sQK0lQwpepbsyf649YnP/lJ+3jvpWZ2U/1beRW8KZ9y+trXvlaso3U//OEPl1cpnivQUm6Wp2YvqkpLLLFEsY6ClHyKsVQ+KdqJL0/TY3/rZO2zD6RU0JQFdKZwien3TftQfu8Nb3hDUTfLLbdcpjHBkj1OcW7n8zjxxBNtE7VL3++sfWnZNnbEtqfQOq4tuHsjVV/vcrzQ5nX7TG+mQCL9yEWBzzrWHnjggZkPglPwb1XqUwd9z9Wrtl/3WtOxW58p9z8F31QlfwyL+RFQSB/S9v1YpdmxqpIP2F5++eUHVolt2wOZVTzpU3exZfImfY/zoX02ZkyNLaeODfr7tNwvv/e97+Vj7l//+tfsN7/5Td5GFHyvvmcpptyWR9uy3Ge6Hsv9GKV99DPwKy8/y5re198tTUn90VspuLBu9mTlE9sOm8ri34ttB8pL53u2b5pRWvXu0x577FG8r7Zg36X4dUIfD2usDOmDsXXk21jT301+vT5tMaavhjhY3bXVTYq/w1K013JQ7uGHH267wBIBBBBAAAEEEEAAAQQQQAABBBBAAIFJJTCtAwGPPPLI7N3vfnem2wHaF9Ra6tf/+gK/7vaQvoYPO+ywgc8q+GHJJZcsAlH8r5uV9wYbbJAHmOgCxBe+8IVs++23H7gdkIIldthhhzyI0F+g0gUBzcCkLzj1RarNBqj1q2alKgc22P4pH3uspb6Q10UZEgKTQWDU+6wMLaBO/UtBV5rZTIFoPohql112GcM9nn329NNPz8cVBTpb/9dtyHT7saOOOiovS99yj9mR0gsKFNtzzz2zGTNmFNtWGRSEp9frghFiPGbNmjWwLdtnBYLZYy3XXXfdgdJq9g4fRKjbsqnuNDuJAqrtsxqj6y7Qli/I2Wf8UtvQWG5Jn1GbKB+LNEPeWWedla+mQEbNHrvYYosV5dAtbVVvCs5S8LlmBXnLW95SvK9trrrqqpkusurz/phi5VFwYvmf31cLBFQhHn/88WzZZZcdyF/56HhkxyTLd8MNN7Tdy5cK0CyvY+tqWa4bPVcw7dVXXz2QT9cnFvCubWy00UaVH1dwvA9CPOecc8aspxkCFRy6+uqrD+y/AmLUji2ATIGaCmr0+6YAYNWdD7rsY5minYzZsZYXbrrppoF90X6pvaidKSBSAQ8pXWL6fcuuFG+rHfj60czJGqtf85rXFMHBMed26tsKkvVBqJqFUH28KcC1z9hhO9WnPdln2+rYB+jYZ5qWMX297/FC5VlvvfUG6tXXscZKH4ivY6FuHe8DW5VHnzrw42rXc/UmR73XduzWcUh/E5THpde//vUD45LOnXwwjmwUdKdjw/33399WjDHvt/UhBXlXHdN0u3Y7pt111135uOiDpjUOa50vfelLxTZj2naRSc2DvnXXp0wpx+8ufbbrmJqynDou+tn9rE+qnv35qF5XEJBPXcvtP9v0ONWxXIHwPrBW+6BgLXtNS3++pNviqk8oyLEqKWjMn9/pvKIt9WmHbXna+ynbgcYpP0bpeKi/P/T3kc5RrV0suuii2Q9+8AMrQqflsMfK0D7Yp466HgdTtMW+fTXEoa1ufEX3/TssZXvV337WJrWcOXOmLyKPEUAAAQQQQAABBBBAAAEEEEAAAQQQmDQC0zoQ0Ae++C/87LEC70KS//WxfXaNNdbIdEsXpcUXX3zgC0Wto1lT/Iwa9jlbKmjm0UcfzS+4+4vJ9r6WuhDnAwvKZdXFW80Y5QMc9Dk91wVRzV7w3HPPlT/GcwRGVmDU+6zgdMHdLgT6/qrHmpHo2GOPrbyFmD47Xn12xx13HDMGWdl0ES623HkGNf/5Gexsm36pgJu6FONx7bXX5rPq+W3Z42WWWSa/dfOzzz47ZtO6JVQ5yEGf07ipz5Vnd7MMFNytmeRsXQXlKRhCF7N33nnnyovhmplVab/99qutHwUdKilg28pfXm666ab57XjLr/vnukWxbtnrXwt5/PDDD+fbt/9kpuBIzdpS/vz888+fbbLJJtmNN95oqw8sFfS58sorDxyTFAyggMzbbrstD7gs5xkSkD+wkdITfzvus88+u/Tu/39qt5RWcFtVu3j++efH7K8vqwXTlGdA8uuce+65/3+DWZZvp4tlinYyUIDAJzoX8vuhx3JSu1MwT/k9/7yPS0y/D9mlm2++OVNQhi+nHi+11FIDgWF9z+0UUFjO254rCLicYsYOn1dM32yqY5tN2m+r7XHfvh5zvLjvvvsGZjyVucZtjS+33HLLwK2BrT7UhpVi6iD2XL3Jsu3YXZ4B1vbLltb/fEC0vWdL2XRNbX0oZKxSwI+VobzUTJ0+xbRtn0/5cUzddS1TiEnbcd6Xv0uf7TKmpi6nnHQcftnLXjamvvXjBwVN//SnP/W7VjzuUu7iQy0PUh7LNbOdDzBWO9Z+ylAzEuvWvuW2/YlPfKK2hAcccECxftPf9z6Dru3Qf7bpcep2oABMnQ/rVtZlE5np/Fk/aOmbJmKsDO2DXeuoz3EwRVvs21fbHNrqplznff4OS9leZ8+eXbRRBffr3IKEAAIIIIAAAggggAACCCCAAAIIIIDAZBSY1oGAKStMARb6lb8CP/Q4ZdKFPgVWXHTRRfktffVLcV1gCE3PPPNMfvvGM844I1NwjJ6TEJjuAuPZZzWzh2bq1C0JNRPCpZdemt1+++21AYDlupioPhtb7vJ+pHoe4/HQQw9luk2ixj/NOKMg7JCkC2K6OK1gPc1K1/Y5fyupuuDG//7v/85njLGLoLoAPlmTbiV29913Z9/+9rfzWbQUEGa3iW3bJwUWaVYo1UvdzIpteUyl92Msh+WgQE0FwWq2RJ1HDKPeYvp9m4vy1vh82mmn5eOz2nJVGs/jhG0v9djRtz2NRx0Pu6/rGKbZT9VWNTaFno+nqIPYc3VrD5NlGdqHUu5P37bdVoaYuhuvMrWVWe937bPjOaa2lfdPf/pTfgtvzfilv5d1zqD+GpImstxt5VMQ8Q033JCfY+qcUfs5EWki22GX/VU5dct2/V2kH0cogE2vTdbUpQ+Odx2laot9+moXh9C67vp3WGi+Ievpb/YLLrggU+AwCQEEEEAAAQQQQAABBBBAAAEEEEAAgckqQCDgZK05yo0AAgggMBICupitW51ZgJ+CD+uSbjFl633wgx+sW43XEUBgGggwdkx8JVMHE18HlAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCdAIGA6SzJCQEEEEBgGgro9osW3KdlUyDgSSedVKz7jW98YxpqscsIIGACjB0mMXFL6mDi7NkyAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAegECAdObkiMCCCCAwDQTWHjhhYsAv913371y73V7uyWWWCJfT0vdjpCEAALTW4CxY+LrnzqY+DqgBAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmkECARM40guCCCAAALTWEDBf35WwF133TV79NFHc5G//OUv2VVXXZUtu+yy+ToLLbRQduedd05jLXYdAQRMgLHDJCZuSR1MnD1bRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSCtAIGBaT3JDAAEEEJiGAi+88EK2xRZbDAQDKjDw1a9+dTbPPPPkr88999zZ1ltvnd1zzz3TUIhdRgCBKgHGjiqV4b5GHQzXm60hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnwCBgONnS84IIIAAAtNI4K9//Wt28cUXZ1tuuWW20kor5UGAyy23XLb55ptnBx10UDZnzpxppMGuIoBAqABjR6jU+K1HHYyfLTkjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5AIGAyUnJEAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHhCRAIODxrtoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAcgECAZOTkiECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACwxMgEHB41mwJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeQCBAImJyVDBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIYnQCDg8KzZEgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALJBQgETE5KhggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggMT4BAwOFZsyUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEkgsQCJiclAwRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGJ4AgYDDs2ZLCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCQXIBAwOSkZIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDA8AQIBh2fNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBILkAgYHJSMkQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgeAIEAg7Pmi0hgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkFyAQMDkpGSIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwPAECAQcnjVbQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC5wMgEAs4111wZ/zCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gZg2kDzKbhJkSCAgAYgEYNIGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0gSnTBiZB3F7yIhIISAeeMh04JgqYzxJFThugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuYGm0geZTdJMhwZAIBJ4EVRUQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBg5AQIBBy5KqFACCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQLEAgYbsWaCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIycAIGAI1clFAgBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBcAECAcOtWBMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBkRMgEHDkqoQCIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIBAuQCBguBVrIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDByAgQCjlyVUCAEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEwgUIBAy3Yk0EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEERk6AQMCRqxIKhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEC4AIGA4VasiQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMDICRAIOHJVQoEQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQCBcgEDDcijURQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGDkBAgFHrkooEAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALhAgQChluxJgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjJ0Ag4MhVCQVCAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFyAQMBwK9ZEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYOQECAQcuSqhQAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiECxAIGG7FmggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMnACBgCNXJRQIAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXABAgHDrVgTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgZETIBBw5KqEAiGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLkAgYLgVayKAAAIIIIAAAggggAACCCCA8guSBAAAAhRJREFUAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAwcgIEAo5clVAgBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMIFCAQMt2JNBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZOgEDAkasSCoQAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAuACBgOFWrIkAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDAyAkQCDhyVUKBEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIBAw3Io1EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBg5AQIBR65KKBACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC4QIEAoZbsSYCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIydAIODIVQkFQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBc4P8BSRjfgJoJns8AAAAASUVORK5CYII=) ###Code ner_tagger = MedicalNerModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") ddi_re_model = RelationExtractionModel()\ .pretrained("re_drug_drug_interaction_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setRelationPairs(["drug-drug"])\ .setMaxSyntacticDistance(4)\ ddi_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, ddi_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") ddi_model = ddi_pipeline.fit(empty_data) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' lmodel = LightPipeline(ddi_model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df annotations[0]['ner_chunk'] ###Output _____no_output_____ ###Markdown Chemical–Protein Interactions (ChemProt RE) Accurately detecting the interactions between chemicals and proteins is a crucial task that plays a key role in precision medicine, drug discovery and basic clinical research. Currently, PubMed contains >28 million articles, and its annual growth rate is more than a million articles each year. A large amount of valuable chemical–protein interactions (CPIs) are hidden in the biomedical literature. There is an increasing interest in CPI extraction from the biomedical literature.Since manually extracting biomedical relations such as protein–protein interactions (PPI) and drug–drug interactions (DDI) is costly and time-consuming, some computational methods have been successfully proposed for automatic biomedical relation extraction.To date, most studies on the biomedical relation extraction have focused on the PPIs and DDIs, but a few attempts have been made to extract CPIs. The BioCreative VI ChemProt shared task released the ChemProt dataset for CPI extraction, which is the first challenge for extracting CPIs.Computational CPI extraction is generally approached as a task of classifying whether a specified semantic relation holds between the chemical and protein entities within a sentence or document. The ChemProt corpus is a manually annotated CPI dataset, which greatly promotes the development of CPI extraction approaches. ref: https://academic.oup.com/database/article/doi/10.1093/database/baz054/5498050 | Relation | Recall | Precision | F1 | F1 (Zhang, Yijia, et al., 2019) || --- | --- | --- | --- | --- || CPR:3 | 0.47 | 0.59 | **0.52** | 0.594 || CPR:4 | 0.72 | 0.81 | **0.77** | 0.718 || CPR:5 | 0.43 | 0.88 | **0.58** | 0.657 || CPR:6 | 0.59 | 0.89 | **0.71** | 0.725 || CPR:9 | 0.62 | 0.84 | **0.71** | 0.501 ||avg. | | | **0.66** | 0.64 | Here are the relation types ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABqAAAAKiCAYAAAC5LR1MAAAgAElEQVR4AeydCfAeRZn/8a/rsZZuYemWWmrtanmUioVKiccWXpS3Fh4L4lGKtwviLh7rsSqEhCOcSSDhvu8jHOG+hEAgQA4gQCAkIVwxAQLhTGCB+df3dXvS87zdc7xz/N43v09X/X4z70zP092ffvqZmX6muzdJCBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBokMAmDcpCFAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQSHFAoAQQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQKMEcEA1ihNhEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACOKDQAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgUYJ4IBqFCfCIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEcEChAxCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAo0SwAHVKE6EQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEI4IBCByAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABBolgAOqUZwIgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQwAGFDkAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCDRKAAdUozgRBgEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQggAMKHYAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEGiUAA6oRnEiDAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAcUOgABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAoARxQjeJEGAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAA4odAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKBRAjigGsWJMAhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAARxQ6AAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgECjBHBANYoTYRCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjig0AEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIFGCeCAahQnwiAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABHBAoQMQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKNEsAB1ShOhEEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCOCAQgcgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQaJYADqlGcCIMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEMABhQ5AAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQg0SgAHVKM4EQYBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIIADCh2AAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhBolAAOqEZxIgwCEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAAHFDoAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCDQKAEcUI3iRBgEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAOKHQAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgUQI4oBrFiTAIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEcUOgABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBAowRwQDWKE2EQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI4oNABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIACBRgnggGoUJ8IgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARwQKEDEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACjRLAAdUoToRBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjggEIHIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEGiWAA6pRnAiDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhDAAYUOQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEINEoAB1SjOIuFveUtb0m22GKLZNttt+UPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5Dmy33XbJ61//+uSLX/xicQcwMSAAgZQADqgURTc7L3rRi5JXvOIVySabbMIfDNABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHRhyHXjJS17SqyM5oQgQgEB5AjigyrNqJOarX/3q5DOf+Uzy3HPP8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlwH1q1bl7z4xS9OvvnNbzbSR4wQCIwXAjigOq7pf/zHf0y23nrrjlMlOQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEBiEwP/+7//2RkBpKj4CBCBQngAOqPKsGomJA6oRjAiBAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEINAJARxQnWAmkY2QAA6ojisVB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJLdRE3jhhReS9evXJ88999xGXU4KBwEIDB8B7M/w1UlRjlRnzzzzTKIXx2EI6NAw1AJ5KEsAfS1LingQgAAEIAABCGysBHBAbaw1S7naJoADqm3CRv4wOKAeffTRZPHixckdd9yR/O1vf0ueffZZk0t+QmA4CajzY7PNNks23XTT5KUvfWnyohe9KNlkk02SD3/4w8OZ4SHO1dq1a5Pbb789WbJkSfLQQw/hxBviuuoqa0888UTv3rB69Wr0IQAd+xOAMgKH3v/+9/fuGS972cvSe8YWW2wxJjlHh8YEO4kOSMDXV7/9fPSjHx1QIpdBAAIQgAAEIACB0SaAA2q064/cjx0BHFAdsx8LB5QcTGeffXby+c9/PlH66rC3f+973/uSXXbZJbn44os7JkJyTROQY+F1r3tdrb9bb7216Ww1Ik83+3322SdR54evw9/73vcakb8xC3n++eeTWbNm9ezAa1/72gw/sfx//+//9bj+/ve/T2z9F+nUVVdd1Yfum9/8ZlQH//CHP2TiF8kvo89+nh988MFo2n/84x8zaef9uOeee5I3velNfbLe+ta3Zi4rk/83v/nNyXvf+97kc5/7XDJhwoTkgQceyMjwf5SR95a3vCVRJ/oXvvCFRDyvv/76RJ2FVcK6deuSww8/PPnRj36UvOc970k756UPcvDusMMOyYIFCwpFKg9l6qgoju5BwxxGzf4ceeSRyY477tj7O+CAA4YZbWt50+hYtbcPfvCDGZv3wx/+sLU08wSPmg7llaWJc+hoExTbk6H3h9122y2RE9d/5vrpT39aK1HqvRY+LoYABCAAAQhAYAwJ4IAaQ/gkPdIEcEB1XH1dO6Bmz56dvPGNb8y8OL74xS9O3v3udycveclLMsf1cvmlL32pYyIk1zQBjWLYfffdo85GvxMhtK9RRY899ljT2WpU3ooVKzK6e8ghhzQqf2MTdvXVV/dGjvn1LWdAzA5Mnjw5gyCmU3JayaET0peTTz65r9NK6X/1q19NFi1aVEq+n9+8fauzGuWpTrOXv/zlGT1x6WcSz/mx7bbb9l3/L//yL4nK5ocYn7w8yw7/7ne/88Wk+4PIU1rbb7998uSTT6Zy8nZuuummnkPMz6OcbRphqHp1x7V/6KGHRkU9/fTTicri4tfZHnzwwdF0hunEqNift73tbWm9fPvb3x4mhJ3n5c4770xZSEePP/74zvPgJzgqOuTnuY19dLQNqs3L1IwJvm0/6aSTaiVCvdfCx8UQgAAEIAABCIwhARxQYwifpEeaAA6ojquvSweURor4HYkf+MAHkvPOOy95/PHHe6XWOgi/+tWvMi+Ve+21V8dESK4tAjvttFOmbi+66KJEo2D8P30dvmrVqp5TwHUubL755m1lqTG5tjNx3rx5jcne2ASdccYZGWezRs1ce+21aTE12kajlVz9a3vFFVek5/2dnXfeORPvz3/+s3+6b//SSy/NxJdzI2+9rqZ19sc//nEmfZVNeSgTbN4dn5tvvjl6uUabuHjannXWWb0pTuUU0gitU089NbGjz4466qjS8s4888ze+jUauaRpE1VPX/va1zJpfv/734/K0wnxl4PRdxppVJY6GF2QQ/Eb3/hGKlcOPjkxQ+G6665L46nMSl+M5JiSrZE9cUw+9alPpfZHTsLp06en5xTnhhtuCCUxdMfGyv6sWbMmue+++3o2uwiKptd13LXNcyIWydoYzi9btizDY/ny5WNarLHSoTEttEl8POholTZr8LT6s2q+7rrrrkz7ufvuuwfO33io94HhcCEEIAABCEAAAkNPAAfU0FcRGRxSAjigOq6YrhxQM2bMyLwsfuxjH0vU4WeDOiPVIe06qv7617/aKPweUQK2M1/TksXCNddck+qAnAzDHk4//fQ0v//wD/+QrF+/ftizPCb5O+ywwzLTqm255ZZBOyDb4KbnlLNBTqlQ0PR8zlZoq07dvDBnzpxM/KJpwJrW2V//+teZ9JXnV7ziFXlZ7p2TPr397W/vu1b88oJ10IXanBwIGn3mOL7mNa+JirTy5Cy2QdPubbfddqk8yfWdSTb+pEmTMnH/9Kc/2Si937o3aLSXy6emMAuFqVOnpnE0LZOcTi5oJJf/EYR1WCqNV73qVb3rNSJXjrVRCGNlfzSFo+pDbTR0P/fZnXvuuWm96Jo8x6l/3ca67/PQSL+xDmOlQ2Ndbj99v042Vh2t0mZ9Nm3vV82XPmRx9wJNJ1snjId6r8OHayEAAQhAAAIQGG4COKCGu37I3fASwAHVcd104YCSM8Hv9Pv4xz+eqCMwFrR+iOvUCk2lFbuO48NNQKMaXIeBXbPG5vzhhx9O49rpxWzcYfjtOxa22mqrYcjS0OVBU6z5dkAjIB955JFoPv/93/+9pwPqmIqFbbbZJtUTrUFUFI499tg0vnRxwcL89YSa1lmte6d03/Wud2XykcdBZdp7770z8V07+stf/pJbZD//cmDFgkYJOZna6ovwUPDl/eu//msoSu+YRif58k488cRg3BtvvDGjE5qyz3cY2YsOPPDAjNylS5faKOnoOTnV7P3Dd2wrfxqBa4PuTzqnjyRGJYyF/dEIOlfHZdreU0891RstJYen/vLqeVS418mnHK2On0ZGjnUYCx0a6zLb9Dd2Ha3aZi2ftn4Pkq/f/OY3jbWfjb3e26o35EIAAhCAAAQgMBwEcEANRz2Qi9EjgAOq4zpr2wElR5P/db06oJcsWZJbSo162nrrrZOxWpQ7N3OcHJiArwc/+MEPcuWoc1LTY33iE59I6kytkptIgyc/9KEPpZ0hGpVDyBLQwuFyOLkOV40uueeee7KRzC8tCi47sO+++5ozG376OlXGXvznf/5nmgeNPNKIl7zgy6+rs9LpV77ylb307VSj6oCLBa3N8tKXvrR3nXVcXXzxxbHLesf9/Od1ck+bNi3lojryp0T0E/Dl7bDDDv6pzL6dXmzPPffMnNcPTQP4jne8I01XTgx1BOaFyy67LI2vfJ5yyil90V//+tf34oTWb9KIN6eD2oYcbV/84hd7cWLrYfUlOAQHxsL++A6LomkWhwDR0GVB9zaniyeccMKY528sdGjMCz3OMjCsbXaQfGn07zC1n3GmShQXAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV9sOKNvhpw7gpoIMrf5sUMemnBarV68u/ZX1ypUrCzs/bTpN/1ZZQh3iWiMr1FHadPptyrMLnGsqtmEL6vzWOhy2E1z880aoSN/8kT1aZ4eQJaD131xnkbbqcKobrE4dccQRhSI1bZvLh0Yj5QUrv67O+musaETQq1/96jQveY4kNxJMTig3OtSVQSMFY8HmP29tpwkTJqR5kezbb7+9T6yVl8fj8ssvz8g7/vjj++TtsssumThyOBYFrcnkyq6tptuzYcGCBYkcVVrzyYZtt902vT42aue4445L/ud//ieRnKqhqXuSnJW6f2mdEzuKy+apjv0Z1OZpSkU5kV1dlF3PSfc3OaOrhkHvz3n31AceeKD084HLr+TJcR7SLRenzFZTamqqVsfPfmShqTLz7jlFaVTRH8mqo0PDUjeOiepGDnA7yl66rnXqikJZHW1at1y+6ubfybHbQduslTNofVs57vcg+VJd+s9cdv00tZ/Q9LAuzdC2Sr03YQNCedCxuvXf1D2o6XqOlTfveF4bG/V3orxycw4CEIAABCBQlYDumXqv0DT4BAhAoDwBHFDlWTUSs00HlF4StZ6I62TRiAN1qg0a1Impr9s1isAtWK/868VRf/oa3s0j79JUh20oqCNit912S/S1u+sIlkxNu6RrYqO01DH5tre9LfP3zne+M7n33ntDySSazsnFt52wZ599dvKGN7whUx6tcaKgdVRmzpyZbLbZZim/f/7nf07yOpGDGRiSg2eeeWZaDtXN/PnzK+dMo2EcS7cNdShpBJI777YHHXRQMD11JMgZ8uEPfzjVKeVPa3Jo2jOtM6A60honmkIuFOy0XppeirCBgOyA2qlrky972ctq2QEn2epUrH5cfNvJOnHiRHcquLXyB9FZX7CcTo7BvHnzks033zz9HXO+XHLJJWkcrZX07W9/O/2dN6We0rX5X7RokZ+dzL5ds0lOVxusvDwHzX777ZfmU2VeuHBhRpzqwo3qcu2tzLppmrLPMdRWNrxKeO1rX5te/73vfa/KpcG4Td6TdA/TdKNf+MIXMs4dOXq+8Y1vJDEnZVX7U8fmqdNPerzpppumHF19OFvrtvr4ROVx9zjnsJItzVsTTKAHvT+fdNJJvWcE2RvXQa12pqB7qnTYf0aQLZKdzwvqzP7FL37RW5vSL4McmHp+0GhG3curBF+P3fo16njWFJO6zzumerZQ/ZYJg+qPZFfRoWGqG8dFzrrf/va3vbp19S6GGrGpaUpnzZqVaNpfHbvyyivdZb1tWR1tQ7dcRurk38mIbau2WStn0Pq2cuzvOvny9fWNb3xjT7SeM/Shiz9KV+8fp512mk2697tsvStyGzbAz1Sd+m/qHjRoPfNO5Nck+xCAAAQgAIGxIYADamy4k+roE8AB1XEdtumA0lfRriNF21133bVW6WbPnp1oGixf5ic/+cneiJWvfe1rmeMuzqmnntqX5kUXXZR5SVUH0Je+9KVEHUruOr3UhhwJGt3ir4Oi+OqU0suvDXppdfK01SLffpg7d27y85//PO0oUxyVQ1+Iq3PUv9bfV0fIqAV1Drky6MvvZ555pq8IP/rRj3pr46jTPRQ0FZvv0JS80NRl6mR83/vel6aneN/5znf6RKrT0E2Jpjjq0P/oRz8a7FxVp1aojiV0//33T9NSZyshS8Bf3F6c69oBJ93XKTkz9OCVF/xOK+UjpmdOhi9/UJ11srTdeeede3ridOkrX/lKqjehtZzWrVvXc6Qqr1pvSfrn63/elHpKz89/3nSDjz76aPJP//RPaV7k9A4FX554x0ayqG377e9Vr3pVorL4wepE3mgq/zq7WLycHGWDHcE1Y8aMspdG4zV1T1IHpBxPqmv96UOLL3/5y8nLX/7y9Jj0JuT0q2J/6to81aXLY9FWTn+1uZ/85Cc9B76L7/Q/BrXO/Vn1ofuInFwuPU0PKJ3Mu6eec845wezoHi2b7mRpmjq1D/cBjDuu+3iVoOkh3bXiow9zdO9xx/ytni9uueWWXPF19EeCy+rQMNWNA6KPRHzHsj7i+frXv55x5Pk85fjwQ1kdbVq3XB7q5t/JiW2rtllfTp369uWE9uvky9dXTb17//33Z6b49etb9ibk8C5b723ZAMekbv03cQ+qU8+8E7maZAsBCEAAAhAYOwI4oMaOPSmPNgEcUB3XX5sOqE9/+tOZDpWbb765kdJtscUWqVxNHaVRS3rhlDPhq1/9asahoymM/PDHP/4xvVZfP/tfR6qTSuuauJdXfSltO04lS/F8x4WcYKFw6aWXprIk006z467xOx71BfQ222zTu07ro2jtFHWy+tP16KtoTbEzSsHvXNtqq636sq6XcMddHQOxoOmxXDxtr7jiimBUjR7z42ldMT/4U8Kpg+/YY49NmepLcnUo+tdrvYFYUGeXi/utb30rFm3cHndr6jhGtk0OCsbXKY2OKwp+p5Xykjd9nWT58uvorMuXG/HkHDz/9V//lepNaD2lvfbaKz1/4YUX9uyHY6htbNSUS8/Pf950g/76G5KrthAKvjytXxMLcjD6+Qyt0yQ77eLIWVBUFy4t+1GD7EHZYEdwaRRaU6HOPem2225LHY1ioi/K3X1HzkF/9GBoxFdZ+9OEzdMow6uuuiozMvcjH/lI75iO+39+nar9uPoWq1ho4v4s2f/2b/+WpqepOeXMU/q6p2rEk0Y0ufxoG1s/zl+jTCMmXFi7dm3PseZkxNqMi2+3uk+4azWNpEtH6x4ecsghvVFV7ry2eWts1dUf5a2MDg1b3Sjf6vx2nORomD59evohgp6RNBrandfWjTC39aHfZXW0Kd1qOv+hMunYoG22qfpuOl+SpxGhrl71jKyPyPRbdaP1DP3zOp73oUJRvbu2KTlN2gCVo0n9HfQe1EQ9804U03KOQwACEIAABLohgAOqG86ksvERwAHVcZ225YDSuhXqEHAviXpB1BQ4TQT3sinZblqVz3zmM+mc/86JJCeRn6Z1CIWcF/o61s93bNotfwqi2FReejF25dcXurGg6d9cPJVD+3JqaZoqF/bYY480js5rjYNRCepM9b8YlwNNI9P0d8wxx/Q6Ax1zxdNURLEg54Vjpa3fIeBfI7ku3nve856MHmgdGZeeOr/VCWCD1pBx12v7m9/8xkbp/ZZ++aNH1PlB2EBA6wg41uJYNG3chivz96xOfeADH+g5a9XmYn/vf//70zpVp1JesPLr6KzS0ZR2blSGG7k0ZcqUND/WoSNntZuiTp1pCr6TVizzphy0+Q/ZKOmu2olfP1ojS9faYOXJSWLDmjVrkv/+7//OyAs51uSY8NOU/Ssb/vznP6fMxCA2LV1InvKma/Qnp3OZKf9CckLHBr0naQSnP8pJjhEb3Bpgyrc61P1Q1v40afOU/rve9a6UpdpbUVD7dOxj60A2eX/2p67V6GalLSe1vy7Qu9/97jRPoY9ItK6My7Obxs8vpz/CObRmmh/X7vvT7Gm0tdKRQ9p/XvFHSOo5LRTq6o9kltGhYasb5VtTiuojIldHsmU2yMnszmub58gro6OS34RutZF/W3b7u0qbbbK+bT7s7yr50rVWX919cqeddko/ItIzpGy8q/uYzZG8vHpv0wY0rb+D3IOarGfeiaxm8xsCEIAABCDQHQEcUN2xJqWNiwAOqI7rsy0H1HnnnZe+/OklUF/ZNxE0DZV7qXRbjYDyO5Y01ZKm17ruuuvSJOUQ07o+7podd9wxPWd39JWsi3f00Ufb072p+dx5bfXVdyj4X/lvu+22oSi9Y3YqElseRVIHop/m1VdfHZU3bCeuv/76TN79cth9fcGaFzQ6ye9Y0IixUPBHMPmj3OTU0xolLl11WoSCna5LoydCwTrEfJ0LxR9vx+xUa3ICNBGq6JSra3/rnECxvFSRX6SzSkM2wqXvppvTdGjumGyTH9yIBHWuSRcV/JFKOp435aDNvzqaXJAt1DoodoSqOrDs9FTuGitPjiW1CZVFzihNcemP1JEjWVOw+R3qTpbWrHDl1nb33Xd3pwq3jou7Xg7OssGNlNW1ZeqsrNxB70mS7zsZNEpH9s0Gf4SsXcuujP1p2uZpujjHX9uie5G+jvc/QAh9NNDk/VnOUt/BqTzKwWQ/bPjud7+blkOMbTjhhBPS8+qkDgVNC6vpM0P1FoqvY/beovzpPmTbih3tF1qXra7+KD9FOjSMdaPRTfoowOnh9ttvH8StaZRdHG3FNBTK6Kiua0q3ms5/qEz+sSpttsn69vMQ2q+SL3f90qVLM3WqetXznm0//lpvsWl/i+q9LRvQdP0Pcg9qspFfnB0AACAASURBVJ5tO+OdyGkrWwhAAAIQgEA3BHBAdcOZVDY+AjigOq7TthxQv/zlLzMviXYKNFdMvYjpS/TYn4vntrYjVPlftWqVOx3d/uEPf0jzo5EIofWd3MVab8V1Wuyyyy7ucLr1p3JSx5pe/kLB/yIw5ijRgvAuLW011V5odJN96Q6N2gnlYRiOqdPUL6NGE2kkg/70Rb8/Aiw0ssKWwZ8SJeTQ0LRV7stofV3rdw4efvjhaV7EWmsHhMK1116bxlPeVU+hoM5UV7ai0Vuh6zf2Y/6oE3GK2YGqHKxOaVottbHYnz/dnfJRNH2dlV9XZ9VR7fRkwcIFveJq646pw9w5lLQegzs+efLkFI3fTuQ8ygs2/9J1jcJ0X4s7+W6rDjx/yjQr28pz19mt7LEcZUuWLLEi0t++Ldb1amtlgtac8kcLWaddngzdX3zH9e9+97u86JXODXpP0lSjPr/QOiU33nhjOnJOce2ad2XsT9M2z1+HS0ytY8fC07pVfjlDuuHrRN37c6g+Qvbbd+xJ/22wowNOPPFEG2Wg33Yko0Zxh54hbDw7hW8T+qMCFOnQsNeNbGeo7ahs+iDE173QGmqKV0ZHFa8N3Woi/8pbXqjSZpus77w86VyVfDlZvr6qbvU8GLJBWkfP1X1odJzkFdV7WzbAl9tE/Yf0sui9qMl65p3IaSdbCEAAAhCAwNgQwAE1NtxJdfQJ4IDquA7bckB97nOfS1/+9BLoj1Dyi2hfJt0Lo7bqNPWdB7rOdoRqfYcy4S1veUuan89+9rPRS/SFq5sqS3n47W9/2xdXTimXTzt1lousziIXR9s5c+a4U5mt/+KmeJpqLxQ03ZYvr2ynbUhW18e+/e1vp3nX1EM2+FMZXXDBBfZ032+/41ALy9vgL/CukXh+8Nex+cEPfuCfyuxrZIfjrY6MWNh5553TeFWmEovJ29iOf/zjH0/5iGeoo2iQMvs6lbeuh5NtO3OL1qPz5Tehs1/72td6HOQAkiNFQQ4fp2Pa3nvvvb0v7N20ovrC300Tpw5qfxSJRl3kBT//smdy3MhZoM4uTRmpr8M14kZtRc7touDLe81rXtMbtaQ8qA3JTrtyaFSJ/RLdyv7mN7+Zxld+HA8bz/6WDXXpaKtRV2WDHDn+tU068Ae9J/ltw67RpS/ztb6V7s8u3xpFa9mWsT9N2zzfqaw1i4qC1jRyZdBoX1sGXd/k/dnWR6zz2e+gdqMS/bJoSknfYStdlexQ/v3rivZ/9atfpTzERaMRQ0Gjrx03ba2Tqgn9UbpFOjSMdePsqbhoFFgsaPSzY+jbXhu/jI7qmqZ0q+n82/LY31XabJP1bfNhf1fJl7vW11fVbejZ+pFHHknrXXFmz57tLs9si+q9LRvQdP1bvSzzXtRkPfNOlFErfkAAAhCAAAQ6J4ADqnPkJLiREMAB1XFFtuWA8tdXUKdTLKjTVQuru04Ct9WaC/60Ue56vyNUI5Wsg8rF87eLFy/OyM8b/aC1HFwetJ0xY4YvqrfvryUTmz7Kdyyp48p2Hjmh/rRa6hiWAywU/C8mla8HHnggFG0oj7k1LpRvdT6HguvEzhuF4a7TdEWujuxIEE1T5NbXsJ2j+vLeXaetpoeLBX/dFelcLEhPncymppmMpTWKxzXqxvGR46Kp4OtU3roeLj2/nWl0nBtt5M7brS+/rs6qw9qtE2Yd1v6IHn2t768b569RZ7/kP//8822WM7/9/Oc5WjMX5fzw5cnJ5Ac7EmP69On+6b59f7H0KqOYJkyYkOqSdGrmzJl9smMHZMedHmqr+05TYZB7kr3PiKkcDhrJpzW/nL64PGsNk9C9rsj+tGHzPvShD6UsQ2uLWa76SMCVI+QsaPr+7NfHq1/96kTOPBvslFGx9dT0rODy7rayB5q+atDg6/9WW20VFSNdcGlaJ3hT+qPE83RoGOtGo5b9j4Q0wi8WNNWyY2htr39NkY66uE3oVhv5d/mLbcu22abrO5Yfd7xsvlx8bX19tc94Lp7/vKzn79D0lYpbpt6btgFt1L+vl2Xei5quZ96JnOaxhQAEIAABCIwNARxQY8OdVEefAA6ojuuwLQfUK17xivTFXwvbFwV/fSZ1GMSmVHnDG96Qyi07jZL9OnDu3LnR7PjrsigfF154YSaupnfzOz9iXy/760tsueWWGRn+D/8F/Cc/+Yl/KrO///77p+WWs0ZTF45C0Mu26wDSdurUqcFsaxRGmZEYunifffZJZWqUiB/+9Kc/9c6p08FOV+V3JKgOY84uOQHdFH7Ks0aJhMLatWszuqBRNoQNBOR4UT24+peuNxGsTukr5qLgT1+39dZb50a38uvqrD99pp1i0u9M22uvvdIRF+pQ8sO0adNSjuIZW6tJ19j8h0Z3+LKL9q28kFPeH/Gq0VV5wR/VU3YtJo2S8p1gklFlNJ3f0Zg3ojEv37Fzg9yT/A8UXPuwW32hrntcbF25MvanaZsnx4vfpmP3P5+V1hZzZZOO29Dk/Vmy/fr4xS9+YZPr/fZHROpZJeTccxdOmTIlU2aVRfedvDborrVbjQT3+anssbDddtul3LSGlR+a0B/JK9KhYawb37mguli+fLmPJt1XneqjB6d71vamEZMkKdJRF7cJ3Woj/y5/oW2VNtt0fYfy445VyZd/jf/8HRvp43/UJodvLJSt9yZtQBv17+tlmfeiJuuZd6KYdnEcAhCAAAQg0B0BHFDdsSaljYsADqiO67MNB5Q6Bt1Lv7b6orso6Etld02so9p+tRxb6NamZdej0rQasaAOMpcPvejaL+X9l0d1JIVGNumY78CIjYyxnVGzZs2KZSv50pe+lOZrlKZ6sw69JqYO9KfVUeehC1o7y63zEups0kLUrm7tF+VOhrYaXeLiaavpu0JB6xn58ewaHaFrxtMxawc0qqyJYHVq3rx5uWLVHv1O36Lp66z8ujp76qmnpnqiNS/84I+0c7qkUVF2hOP222+fyiiactDmf/78+X6SlfetvFB7sA4yjdAIhSeffDIth8prHW2ha3TMdrj/7Gc/i0UNHvenGxLLpsKg9ySNHHL1rU7QSZMmJYceemhvVObll1+eiF/RVG9l7E/TNk95c/nW/TE2ta7jKz128bW97LLL3Kl02+T92dZHzEEmx5TLl+6tRUFy/DUddW3eiJqYPE0F5tLVNnbP0Acm/jOR1rDzQxP6I3lFOjSMdeOvaab7fayd2JGZsWk3y+ioWDWlW03n39eL0H6VNttkfYfy4h+rki93ndXXmBPYn54yNI225JWtd5d2Uzag6fq3elnmvajJeuadyGkIWwhAAAIQgMDYEcABNXbsSXm0CeCA6rj+2nBAqQj+FEJ2yiZbRH1B53fKaHHcUFAHgotXpvPLyfj617+eXlc0DZg/Pc4222zjRKRb/8vKmKPMX4NI+Y2NjPE7o1Se2LQ+6uTzHVqaqmlUgr/Qccxh55flzjvv7HXCXn311f7hzL4cAk4PtFWntjrstLaXfmsKkpBj8Ic//GF6Xazu9NX0+973vjSeRpu5dXgymUiSxHdWaqo5Qj+B173udSnLIjvQf3X4iK9TefXjrq46fZ0vvwmdVeex01frWPLXwHBxQiOu/PVqiqYc9PMvPqEpyBybMltfnjp8Q+3BLoIe+zJdncWvfOUrUx4//elPC7Ogaz7ykY+k1ygPcjaXDeqkdGy1DfEtK8vGG/Se5K+D9a1vfcuKLfW7jP1p2uZpylnHUnayKJxzzjlpfF2ntVlsaPL+XLY+3vGOd6T5mjx5ss1S8Lc6ev2pplQe256DF3oH/dG7eSMF7ZpldorAJvRH2SrSoWGsm9///vdp3cWc8bIZGjXmdFXbmLOvjI6KVVO61XT+PfUK7lZps03WdzAz3sEq+XKX+fqqtRJDQc9+/nqJ+oAiFMrWu39tEzag6fovq5d+OZqsZ96JfLLsQwACEIAABMaGAA6oseFOqqNPAAdUx3XYlgPqC1/4QvryXzTllXUonHfeeUEKmlrCdSiU6fxyQpxjQtfqy8hYsOsqXHLJJX1RtYaFy8PPf/7zvvOaDsZ3FuV1fPidUXKaxIK/ULLqSw6XUQla48LxKhq5JSeSpmtUfK15Egv2i091LDlGcuTpy+dQ+PznP5/mRfoZCv6UVcpHnr58+ctfTuU1NbonlKdRPua3lyI7oHKqg0nOvFDbchx8ncpbQ8XF1/Q5Tge1XbVqlTsV3Prym9BZp9OaQs4GuzbRZpttlmi6OT9oFKaf/6IpB/38l+HjpxXa9+XFpsyzo8zy6s+3x6E1gWwejjnmmEz5//jHP9ooub/tiMa6I9r8xAa9J/md47Fp4vx0Qvtl7E/TNs93fPz4xz8OZStzzHdevutd78qccz98fcizt2Xuz2XqY+XKlRl9it0vXP78rb2/n3jiif7pwn191OLaspyDsbDDDjuk8T72sY/1RWtCfyS0SIeGsW6+853vpGxiH5Icf/zxaRzxltM7NlKqjI6KVVO61XT++5TDHKjSZpusb5ONvp9V8uUu9vU19iGGRiq5NqZtbJRU2Xp3abttXRvQdP2X0UuXd7dtsp79Z7zQfd/yUp3EnMHj4Z3I1QFbCEAAAhCAQJMEcEA1SRNZ44kADqiOa7stB5Rbi0cvG5ruLG+NBX+EQN4LozpidF5///Ef/1GalOK66z7zmc9Er1OHmounrytD6yypE83F+fOf/5yRpSnH/Kk/FC9vxJXfGaWOwlCQTOXFpRmbTiR07VgfU97dlHjK/29+85vcLJ188slpOfVVZyxIl/wp1Y499th07Zw8Pv5CzRpRYcPChQtTOY63OilCQXlQ23HxtEaXDVo757jjjkv/7HSONv7G+NtOAZZnB7Rml/ty+Ve/+lUQh9UpjSAqCv40d1orIS9Y+XV11l/nYtttt+1L+oILLkh1SLoU6gz316tRnLw17Gz+89pDX2YCB6y8PN4f+MAH0rK85z3vCUj7+yH7BXjIzrqL77nnnsRfT1AjHmIjRd01diuHlWunZUa02evzfg96T/rBD36Q5ilvBJTWqdMUcVYPy9gf5btJmyd5zpkqnvb+pxcfOZAfeuihFJkclo59bI3DJu/PZerDb08hfbj44osTPQuE2qIK5nfeajqtskF67o/+22OPPYKXqsPW2UGx05SzNtTVH8kro0PDVjfK984775zqVGgUjEZR+9MXimHs+Uryyuio4jWhW23kXzLzQpU222R95+VJ56rkS/HVfvxnrunTpweT8EdWhfTDXZRX723ZAKXdtP6W0UtXZrdtsp55J3JU2UIAAhCAAATGjgAOqLFjT8qjTQAHVMf115YDaubMmWkngToALrzwwmDJNJpFa564Tiqt1REK69atyzgzjjrqqFC04LEDDzwwla/1NkKd4FdccUUaR3mJjTLwF/v1O5Q1AsB1TPkvtl/84heDebKdUW9/+9uD8Xbaaac0X+rUffzxx4PxhvGg/RI1NhWh8i498DuNVq9enVskjRhzOuO27373uxN1mMeC3/Etx5imfnRBX2SqbtXx53eka+2ZUFi8eHEmfemPDeogcXnT9qKLLrJRNvrfdv2gGAPp9ZZbbpny0rpJoWB1Sl+65wU9jPmdvvqCOi9Y+XV1VuV1OqB1fmyQ082dj42I2HHHHdM4iuvrrZVn86/RQ3VCFXkatejKou2KFSuCSZ9++umZeOrsCwVd70+V9qpXvSq54447QlFzj22++eZpem9729ty41Y5WeeeJOeDY6VyhZxqCxYsSHS/UryXvvSlGZ5l7I/K0qTNkzyN4nP59u9/GrXnRjS4Nmb5aI2rUGjq/mzTiz0j+Os/vfOd7+zLkht9FHOC+x+ZLFmypO/62AHprmOnbWgEmabL1KhLF2+77bYLjtypqz/KYxkdGra6Ub79URLi5NsZTc3ppiv1P9zRhxChYHUmpqM23qC61XT+Q2Wyx6q02abq2+Yh9LtKvnS91VdNrRsK/ujA2CgpW5+23tuyAcpvm/ob00vLqcl65p3I0uU3BCAAAQhAoHsCOKC6Z06KGwcBHFAd12NbDig5edR54jpS1JF22223ZUqnF0jXwebixV4Y7Rojdk2EjGDzQyNR/GnxDjvssEwMLWzsOz/01Xjsq3z/a0N9mS9HlUbguLWjNB2FP0+930nnJ2pfplV+vzNL66z4U2uoE33p0qW+iKHfdy/xrm5D025IT+S80cgGF0/TkBUFjWRz8bXVWjeLFi3KvUxOUP8adZRqAeVp06alX9ZqCj6tzeHiaYRKKKjOXRxt58+f3xfNn2pFcR588MG+OBv7AbUjjfBwrNTeb7nllkyxxU4OWBdH27vuuisTx/2wOlW0Hpp1gMWmXozJr6uzvg3UWgk2qMNZIy30F3IsyTHnf/UtNqF1dJxcy+ekk05ypwbaVpF37rnnZupQI1tDQc4K3yn0pje9KfHtuXRGslxHssos+zdnzpyQuNxjdi2dvJFZuYICJ+vck9Rp7o8O/ehHP9r7SENllD36xje+0bNpsbKXsT/KcpM2T/L8jyt0Tz3ooIMSOYHdfVHHxEXhhhtuyOjDKaec0jtu/zV1fy5bH75TMzTln/u4QU6/efPmpdnVvUplUJ3oL2/a3PQib0ejYd212mqdTH99No0c80dF6z6o9R9Doa7+SGYZHRq2ulG+ZSt9jtJJOco18sWtOThhwoTEnwY6Nq1zWR1tSreazn9IN+yxKm22qfq2eQj9rpIvXe/ra2jkouLoQzDfrsbuQUX13pYNUB6b1N+yemn5N1nPzvarTfJOZEnzGwIQgAAEINANARxQ3XAmlY2PAA6ojuu0LQeUiqHRKP5LpkaX6Oteda750zb4nQmxkUfq6HLx5Gyw66QUYTv44IPT6yVH0xrJwaMRSv50bhptkDeKZuLEiRk5Lk/abr/99okcR/66G+rE0heP1nnkv0w7Geo82WWXXZKf/exnvc4td1xf7edNu1VU9q7Py9Hiz6/vyrHpppv2piTUtIT6Uwecz97F+/Wvf12YZTFy8bW1X7CGBKhj23cu+dc7GRp55R/XNDH6qlT16gfNde/H01e3Gi3jOy7Vse7i5E0F48vdGPf1tfEnPvGJlIXqXKPMPve5z/VGnTlGbqsRIXa9jphOadSb2lIoyDklPXNytVXaao/WGRiTP6jOat0JTZvmp60vhTVll9/pHMq3O6ZOMq2b5cvQvuyMHCt+iOVfa+VpCq+q68YNIk+OMa3B5udX7dTvxHd51mgQ18mn+KoXOfHVAe87o3Xu05/+dG+EpLu2zFbtUM43Pw3JUv7UUX3zzTeXEZMbp+49yV+83Wfm7yv/WvvIhjL2R9c0afMkb++9987Ur59XtTV1rrrg81E8tdUTTjghqItN3J/99GLPCHb9Jz2TaGSiPghRkN77ZdK+7hn6MMa3JVqnzi+rK3Pe1taZZMuxKkeJRo/pfujS1v1E00/mhTr6I7k2P6F7mOINS934LPxOb8fMbcVFNtYfWf/AAw/4l6f7vs7o+piO+vEG1a00UTOdn8u321bJvy8ztl+lzUpGE/Udy4t/vGq+/Gc+PT+EwuzZs9M2JJ56ltBIdI2w94Nfn7be27QBLg9t6G9ML12adttUPfNOZMnyGwIQgAAEINA9ARxQ3TMnxY2DAA6ojuuxTQeUiqI1LDTnuesM8B0O6pTXy8vll1+efrWoaYdCwZ+zfKuttgpFKTym0S52pIVePvXFpEYqhKZRs0L1RfLXv/713kuuK4scRxo546b200uv60xwWzs1hv8y/f73vz/RYtourttqihJ9yZvnELP5G4bfdgFwV56y21mzZhUWY88990x5yaFpHRYxAeqA1GgDPy9yMukLagU7FaOLZ79ED3UgyNnodEAjV/zOeK3ZMZ6DnBOaos2O5nF81ZbkLFG7CE17mKdTchLZILvjZIe21qGbJz90vT1mdVaOJhtHv1VOfaVdJvhf8FtZdk2govxL76uEQeVpbTWbV9n/UFizZk3PUe137Ls2I06ysxqB6NpUSEbsmByANh/+77prYyndJu5JJ554YjrSy91PlE+N0pFjNfahRZH98bk0ZfMkU/ciOWOUR5dfbTV9pL6q94OdOtLxj62FV/f+XKY+lIbLh7/Vc4iC9EZrVfmjpPx40lU9K1RtT5It+yZZciDLCW4drdJ9TQmokVJl72eD6o/yU0WHhqFuehX0f/80WkyOafF0eih2Gjmp4I8yiU3rrHhldbQJ3fq/rPc2TeXflxnbr9JmnYy69e3k5G2r5st/btM0mqFgnSqu7dqpdPPqvU0b4PLcVP2X0UuXZmjbRD3zThQiyzEIQAACEIBAtwRwQHXLm9Q2HgI4oDquy7YdUK44GkGiUUD6mltfxNsvUvVFur487CKoM1zTfmmEgab6Ktsh7OdNnW36alrT5g3SQep3cMl5paBOc3G49tpre/t+euxnCUh/rrrqqt7f2rVrsydL/Fq1alVvqih1NhC6IyA7oHU6NNWYnECyB2pLo+Zk7Y7Yxp+SHpg1pZjssaZnlH5o1Nx4Chp5qfuJpqm1zu6mODRp85TfO++8M1m4cGEyiP3NK1MT9+c8+WXPqR40Bac+ipFDo+69Qs83/gc2cjKJo55F9ExUdZSiX44u9EfpjWXdyD5ohNIll1ySFl1MpYe6h/hOO3/6Yo30GoYw1vkfpM12Ud+D5Kur+mzSBox1/ecxa6KeeSfKI8w5CEAAAhCAQLsEcEC1yxfpGy8BHFAd121XDqiOizXUyenrR/dlprYLFoZHfQ11IcgcBCAAAQhAAAIQaJmARmpohLGel/TxTl6w6+bZ0al517Z1btTz3xaX8SKX+s+vad6J8vlwFgIQgAAEIFBEAAdUESHOQyBMAAdUmEtrR3FAtYY2KthfGF7T/5VdEyYqkBMQgAAEIAABCEBgIySg9eHcRzt6fsoL/jRsmnJ5kBHqefIHOTfq+R+kzFyzgQD1v4FFaI93ohAVjkEAAhCAAATKE8ABVZ4VMSHgE8AB5dPoYB8HVAeQTRJ/+ctf0s4Urf1EgAAEIAABCEAAAhDIEtDUh875pG3elI8aSfHmN785jW/X3sxK7ubXqOe/G0obbyrUf3Hd8k5UzIgYEIAABCAAgTwCOKDy6HAOAnECOKDibFo5gwOqFay5Qj/5yU+mHSTf+ta3cuNyEgIQgAAEIAABCIxHAs8++2yi51TnhJo9e3YQgzr63/ve96bx9HHPMKwlN+r5D8LmYGkC1H8xKt6JihkRAwIQgAAEIJBHAAdUHh3OQSBOAAdUnE0rZ3BAtYI1KlTT7bm1DNSh8ra3vS3RQtoECEAAAhCAAAQgAIEsgR122CF1LL3uda9LjjjiiOTOO+9M7rvvvkTr6+yxxx7JW9/61jTO5ptvnjzyyCNZIWP4a9TzP4boNoqkqf94NfJOFGfDGQhAAAIQgEBZAjigypIiHgSyBHBAZXm0/gsHVOuIMwnMmzcv7SRxX/S+6EUvSrRuAQECEIAABCAAAQhAYAOBp59+Otlpp536np3cM5Tb6nl21113HSrnk0ox6vnfUBPsDUKA+o9T450ozoYzEIAABCAAgbIEcECVJUU8CGQJ4IDK8mj9Fw6o1hFnEnjiiSeS4447ru9PX/MSIAABCEAAAhCAAAT6CSxbtizRuk6/+93vkp/+9KfJ9ttvn/zyl79MDjzwwGTmzJm560P1S+v+yKjnv3tiG1eK1H9/ffJO1M+EIxCAAAQgAIGqBHBAVSVGfAj8nQAOqI41AQdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH1v/71r08+9rGPJXJE8QcDdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAHhlsHNt10015f7ne/+92Oe5NJDgKjTQAHVMf1J+fTNttsk3zqU5/iDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA4MuQ589rOfTT74wQ8mv/jFLzruTSY5CIw2ARxQHdefvmbYeuutO06V5CAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQGAQAkzBNwg1roFAkuCA6lgLcEB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9OKA6Bk5yEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAARqEMABVQMel45rAjigOq5+HFAdAyc5CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAI1COCAqgGPS8c1ARxQHVc/DqiOgZMcBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEaBHBA1YDHpeOaAA6ojqsfB1THwEkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEANAjigasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAHnryBgAAIABJREFUAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOq38sHFBPPfVUsnLlymTx4sXJokWLetu77747eeSRR5IXXnihj8Bzzz2X6Jqiv3Xr1iXPP/983/WhA2Vlrl+/PpinkMwmjz344IPJbbfdNiZpN1GOGF/VUV7QzTNWzzpHGB0CMR2w9Vul3RaVXvZDNuWWW27p/a1Zs6bokt75snmtag8U35Z30N8h21iqcCUiyW6uXbs2WbFiRc8233XXXYlskPJfJpTlFyv7008/XSYZ4kAAAhCAAAQgAAEIQAACEIAABCDwfwRwQKEKEBiMAA6owbgNfFVXDih1UN50003J9OnTk1133TX6t9deeyXHH3984jsq5KzKu8Y/t9tuuyUHH3xwcs455yRLliyJcqkqc+rUqcnMmTNbdwqp3BdffHEyYcKEXpnvuOOOaBmG+cTy5cuDdbbnnnsmjz/+eDTrCxYuCF6nOl64cGH0Ok4MH4GqbaxMuy0qpRzZvj2QHSgTqua1jD149NFH03bs52nQfd8mlilTmThy/Fx99dXJPvvsk3Lbfffd033Z01NOOSUR17xQhV+o/Erz2WefzUuCcxCAAAQgAAEIQAACEIAABCAAAQh4BHBAeTDYhUAFAjigKsBqImoXDqiHH344mTJlStqpqQ5IdTiqw3nvvffOHHedk/fee29aPH01f8EFFwzUmXvUUUcloa/r68iUE+2xxx5L89fEjkY33HzzzZmOYLEYVQeUnEznnntuog5sV6duK0deLEhXTjvttL5rzjjjjETnCKNDoE4bi7XbotKfddZZGd2ZPHlyqVGRdfIaswcafeV0volt0w6oe+65J5HD38+b2qtstRzF/nHty5kXG2Fah59LRzIIEIAABCAAAQhAAAIQgAAEIAABCJQjgAOqHCdiQcASwAFlibT8u20H1N/+9reMU2XixInJ7NmzE386NTmb9t1330yHp++Acgj0pb7rrNT27LPP7snR6Co5hDR9lBwV1ulx0kknRaeyszLPPPPM5Jlnnul9jb969ereNF7nnXdeJl2lffjhh2fK4PI4yFaMjjzyyL40lM7SpUsHETk011x66aXBcqmu8oLqzNW16pQwugRsG2ui3YZoaLq4SZMmpXrj9CdkS0LX65jNax17YB1QBx54YLJs2bLetHZyOF977bWZvN566609OyVHjBzPcp65MmjbpANKo0Nli518fQigqQudE0j504hVf2SU4mo0VN5UgJbfrFmzevF1jftTPcn5NWPGjDR9yXZpx+qG4xCAAAQgAAEIQAACEIAABCAAAQhsIIADagML9iBQhQAOqCq0GojbpgNKUyrtv//+aSejHEOxET2arsrvEA11GtvptdS5GQrqOHUdq26r0UWhYGXGpuy64YYb+mSqA7lO0MgsjeyyDjOXZ201ld0oB9W3Xx63r85nOQ5j4corr0yvmzt3biwax0eAgG1jTbTbULFj0zdedtlloejBYzavdeyBdUDJ6eKH6667LtVxtQut+eaH22+/PXO+KQfUk08+mRn5JLt73333+Umn+5paz7fLyqc4x4LlF6trXS8+vmwcUDGqHIcABCAAAQhAAAIQgAAEIAABCPQTwAHVz4QjEChDAAdUGUoNxmnTAXX55ZdnOlA1GiYv+CONQg4ou85IXufmySefnEn7/PPPDyZtZcY6nHWxnd5LoyMGDY888khmhMO0adN6I8M0sso5abRVh+4oh9haUCqbOuBj4Zprrkk55HV4x67n+PAQsG2siXYbKp2m7ZNeWYeupvosG2xe69gD3wF13HHH9WWhyAGlC5R3Zw+ackBpakwnU9tbbrmlL2/+AZtPjVbVKKZQsPzy6lrXazpOlxccUCGiHIMABCAAAQhAAAIQgAAEIAABCIQJ4IAKc+EoBIoI4IAqItTw+bYcUE888URvnSfXuagv3fXlfV546KGHkqOPPrr3p+nvbNBUdU6etnJYxcK8efMycTXFXShYmXkdzvPnz8/IrNKxbdPW6LD99tuvNwJAzhY3GmjOnDmZNEbdAeWPiPA701V/WmdGehIK/vRksdFroes4NnwEbBtrot3aUvrOHjmKNVWcbyt0vkywea1jD/w8aQSlDdaxY0dAKf5f//rXtBxNOKDk+PYddFoDytkemz/3W84muyZUrE1afnl1Lfn3339/cuqpp/b+Yk4tlw+2EIAABCAAAQhAAAIQgAAEIAABCGwggANqAwv2IFCFAA6oKrQaiNuWA2rhwoVpx6k6gtUpXDdU6dy0I2+0/kooWJl5Hc6aQsvv1NZIgDpBaatD2A/XX399Jo2NyQGl9WHsui8aAREKOKBCVEbzmG1jeU6Jsu3WkrjiiivSdqN106yzOG+0nS/L5rWOPdAUm3Iu6y/kaC3jgJJ9cDKKHEV+OWL7dipRrcdVJthRU7F2a/nl1XWZdIkDAQhAAAIQgAAEIAABCEAAAhCAQJgADqgwF45CoIgADqgiQg2fb8sBdfrpp6cdwnLaNDGNWpXOzVtvvTWTfmgKLKG0MvM6nO16RrFRVc8//3yiNa3cX5Uv+zdmB9Ts2bN7Uwr6Tjztr1ixok+rcUD1IRnZA7aN5TklyrZbH4ba2wEHHNBr7/vss0+i34899lim/WtkZZlg89qEPYilW8YBFbt20OMnnnhihsudd95ZSpT9oECcX3jhhb5rLb9YXcshp78mnGp9meAABCAAAQhAAAIQgAAEIAABCEBgHBDAATUOKpkitkIAB1QrWONC23JAaWon39Gg6fXqhrKdm0rnkksuyaQfW3/KyszrcJYDxS9TrHNVnbp+vKJ1UHwuG7MDStOJKZxxxhkZPhoVJaeBH3BA+TRGe9+2sVi7USnLtlufyLJly1J9uuiii9JThxxySHpc086VWWPI5rUJe5BmyOyMhQNq0qRJKRPZqLJ2edWqVZnrdK1GeNlg+YXq+uGHH05laQo+AgQgAAEIQAACEIAABCAAAQhAAALVCeCAqs6MKyAgAjigOtaDNhxQMoC+A0b71sEwSDHLdG5Krr7MP/zwwzN5WLRoUTBJKzPW4azOVk3j58qlDu0HH3wwKFNpuXjannnmmcF4oYMbswPq8ssv7xVZo1P22GOPDKO5c+dmcOi3YxhbbyZzAT+GloBtYyGnhDJfpd36hVX7crriOzT8afl0vowe2bw2YQ/8vPr7XTugtO6c4+S2ZZxyyrNGKrlr3NZOH6p4ll+orjWloJPh15fPhn0IQAACEIAABCAAAQhAAAIQgAAE8gnggMrnw1kIxAjggIqRael4Gw6otWvXph2M6mjUAvZNhDKdmzK+p512WiZ9jYTQ8VCwMkMdzuvWrUvs1FUXXnhhSFzvGA6oDWi0hpXrbL7sssvSE3PmzEmPOx3x18nxHXFlHAepYHaGjoBtYyGnRNV26wqptjlx4sSeLk2dOtUd7m3vu+++jI5pWtCiYPPahD2Ipdm1A0pTgrq2qK2c6FU+DHCcnYyQ88jyO/XUU5OVK1f2/hRfa1D5ckIyYrw4DgEIQAACEIAABCAAAQhAAAIQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKDU4eg6KbXV+ixNhFDnpr7g1zRSWp9JX9bbkU8aaaMpn2LBypw2bVqyfPnynrz58+f3pgTzpxPcfffdEzlS8jpucUBtoO07oPxpEDWi4qCDDsroycyZM9MLb7zxxvQcDqgUy0ju2DYmp0TddutAqI06W+OmeHTn1EYnT56cnpctKFpzyOa1CXvg8mO3XTugrF0Wtyph3333TVnqWk19aIPl5+omtsUBZQnyGwIQgAAEIAABCEAAAhCAAAQgUI4ADqhynIgFAUsAB5Ql0vLvNhxQ6lT0OxzbckD5aYT25YySAyQvVOkw1ciq0LRTVr6m5pMzzP3JOVY2+CN/VKai/JeVO1bxYg4o5cdfu8fV34oVK3pZ9R0LOKDGqvaaSbdKG5MelGm3LmdHHnlkamtC6xmdddZZ6XnJXrp0qbs0uK2S17L2IJhQkiRdO6DsiDA506sE64C65557+i63/LTmlOy//uz1qg8cUH0IOQABCEAAAhCAAAQgAAEIQAACEChFAAdUKUxEgkAfARxQfUjaPdCGA8pO9aQRRE0E27mpDkx1cE6YMKH3pzWajjnmmOTss8/ujWIqk6aVuc8++yTq1J4xY0ZPttJwf+oYzxtNVSa9ojgbswPqkksu6Su+nS5R3DVyZcGCBSl3HFB92EbqgG1jTbRbAZDDybXNQw89NMjktttuS+Mo7gUXXBCM5w7avLZpD7p2QK1evTrDQjw0hWGZoJFjmrLP8dY2ZAstPzvd4uOPP55oVJmTgwOqDH3iQAACEIAABCAAAQhAAAIQgAAE+gnggOpnwhEIlCGAA6oMpQbjtOGAsp2VchC98MILtXNd1Lk5SAJWpr/my/r16/vWftpvv/0Sf62iQdLMu2ZjdkBdfPHFfUXXemFyIroOaW3F4KabbkqP4YDqwzZSB2wbs06JQQujqTB9vZED2v5p2j0/zv77759ri2xe27QHXTug7Np84rJmzZpS+DXy0+eofU2jaIPlF6rrW265JZWFA8oS5DcEIAABCEAAAhCAAAQgAAEIQKAcARxQ5TgRCwKWAA4oS6Tl3204oJRljRzwOyzLTF1XVNQynZtFMux5K9PvcFZcOaHkdPLLcsYZZ1gxjf3emB1QF110UZDT1VdfneG79957JzfccEN6DAdUENvIHLRtLOSUqFoYjZKz7dJvo3n7yk8s2Ly2aQ+6dkA9++yzfaOYNC1fmeBPpSm2mr4v9FGB5Req66effjo59thje3+haRPL5Ic4EIAABCAAAQhAAAIQgAAEIACB8U4AB9R41wDKPygBHFCDkhvwurYcUJquzu8EXrx48YA53HBZmc7NDbHL7VmZtsNZUvwv9l2Z2lqbaTw6oDRiburUqRl9mTJlSvobB1Q5XR7WWLaNhZwSVfN+1113pfpxyCGHJEoj9jdr1qw0rtrvVVddFU3O5rVNe9C1A0qFnj59eoaFHL1lgtqgs33aHn/88cHLLL8m6jqYEAchAAEIQAACEIAABCAAAQhAAALjnAAOqHGuABR/YAI4oAZGN9iFbTmgNErI77C89NJLCzOoL+qffPLJ6LokbXRuWpmhDmfl67DDDsuURw620AiAwkIWRBiPDighWbJkSYavrzs4oAqUZshP2zbWhFPCty9z5szJJWBH78TWi5IQm9c27cFYOKC0Pp7ftrTeXZlgpzucO3du8DLLr4m6DibEQQhAAAIQgAAEIAABCEAAAhCAwDgngANqnCsAxR+YAA6ogdENdmFbDqhly5ZlOjr33XffRCNd8sLy5ct710ycODG4NkkbnZtWZqjDWXleunRppjzqxL311lvzijPQufHqgBKsU045pY+xOOOAGkiVhuYi28bqOiU0hZumgHOOlKJ1jGR37FpQWg8pFGxe27QHY+GACjl6YywcH013qLWzHG9tY8wtv7p17fLAFgIQgAAEIAABCEAAAhCAAAQgAIEsARxQWR78gkBZAjigypJqKF5bDihlT1Nj+Z2WmsouL5x22mlp/Hvvvbcv6gMPPJCel9yzzjqrL07VA1bmzJkzoyKOOeaYTPoHHnhgonVVbFCHrtY7cn/q9C0bND2Yz6zKtWXT6DKe77g7//zzc5PWOmFyPvrl1z4OqFxsQ3/StrG67VYjnpyOHHTQQaXKf9JJJ6XX6Fo5f0LB5rUJexBKR8euueaaTJ660HM5kw444IBMukWjUxctWpSJH3PKqUyWX926jrHjOAQgAAEIQAACEIAABCAAAQhAYLwTwAE13jWA8g9KAAfUoOQGvK5NB5RdO2mvvfZK5GQIBftlfsgB5UZIuc7nE044ISSq0jEr8+STT45ef//992c6YpWPyy+/vC++7bA988wz++LEDiiuK5+2samuYtcP2/HbbrstLU9eZ77L95VXXpnGdxy66Jh36bNtnoBtY3XarRy+/micsrLkZHH6pG1sGj6b1ybsQYyozdO1114bi9rocb9NOibz5s0LpqGRTrLbLp72n3rqqWBcHbT8ytZPVCAnIAABCEAAAhCAAAQgAAEIQAACEAgSwAEVxMJBCBQSwAFViKjZCG06oDT1lTpwXeeltvvss09v6jo3HZ86MzXt3IQJEzLxQg4o22Gr0TKPP/54LSBW5uTJk5NnnnkmKtMfpaXyKN933HFHJv4gDiiVQx3Qu+22W4aDpi68/fbbo+tiZRIewh/nnntuWh6NGNPNMS/o/JQpU9JrxBgHVB6x4T9n21iddqtRdL492XPPPYOjEC0Va4ck46abbrLREpvXJuxBXyJJ0msHGr3ll0XrMTm7GLqmyWMXXnhhJm3ZHTl/NYWe1rZTO1S7U/ldHidNmtSbijQvH5af6rpoir88eZyDAAQgAAEIQAACEIAABCAAAQhAIEwAB1SYC0chUEQAB1QRoYbPt+mAUlbVmRka1SLHjTqPXeem3foOqCeeeCLRl/Q2jn7LQXPZZZdVppInc+rUqdH1nTSCSx2xfl5UFjmmnnzyyV4+yjqgVq5c2ZsOy8rzZfv7Gn0g58z69esrl7frCx599NHk6KOPznBSWQ4++OA+h53Nmxx6frlxQFlCo/E7r41VbbdyVJ944okZvXA6st9++yU33HBDEMpdd92VHHXUUcHr1G7lIFXIy2tde2AzptFG06ZNC+ZJ05bKfnQRtIadP7rJ8ZTTKOQIl72KhTx+e++9dyKHl+4FBAhAAAIQgAAEIAABCEAAAhCAAASaIYADqhmOSBl/BHBAdVznbTugXHE0xd7pp5+eqDPXdXTarTpDjz322N4oBH9kk5wZtkPUv7bsOjAuL9oWydSX/LHgr0Hj8qH8uS/9yzqg7rnnnigLJze0XbduXSxrQ3NcIylCedcxrXNVFJyzQVzvvvvuouicH0ICRW2sSrtdtWpVVJ+kU7IboTB79uzc62RzFIryWsce2HxppFOsbej4GWecYS9p7bfKffbZZyczZszojeZUe/Md4jo+f/78wlFmRfxUrq5Gd7UGC8EQgAAEIAABCEAAAhCAAAQgAIEhIoADaogqg6yMFAEcUB1XV1cOKL9YcqBohNOKFSt6Uz5pVJFGOPCFvE9pfO/rJqpRFXnrzYxvQpQeAs0SkINo9erVvXX6Hn744cLpMptNHWkQgAAEIAABCEAAAhCAAAQgAAEIVCGAA6oKLeJCYAMBHFAbWHSyNxYOqE4KRiIQgAAENiICctxrNJKmG6zyJ2cSAQIQgAAEIAABCEAAAhCAAAQgAIGNiwAOqI2rPilNdwRwQHXHupcSDqiOgZMcBCAAgQEILF26NHfqvti0fl2tKTVAkbgEAhCAAAQgAAEIQAACEIAABCAAgQEJ4IAaEByXjXsCOKA6VgEcUB0DJzkIQAACAxBYtmwZDqgBuHEJBCAAAQhAAAIQgAAEIAABCEBgYySAA2pjrFXK1AUBHFBdUPbSwAHlwWAXAhCAwJAS0Bp5a9asqfy3fv36IS0R2YIABCAAAQhAAAIQgAAEIAABCEBgUAI4oAYlx3XjnQAOqI41AAdUx8BJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBADQI4oGrA49JxTQAHVMfVjwOqY+AkBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgBgEcUDXgcem4JoADquPqxwHVMXCSgwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgwAOqBrwuHRcE8AB1XH144DqGDjJQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQQAHVA14XDquCeCA6rj6cUB1DJzkIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQjUIIADqgY8Lh3XBHBAdVz9W265ZbL99tsnixYt4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAODLkO3HTTTclmm22W7Ljjjh33JpMcBEabAA6ojutPI6C22GKLZJNNNuEPBugAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoADqADgy5DrzkJS/p1dF2223XcW8yyUFgtAnggOq4/r7yla8k3//+95OJEyfyBwN0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAeGXAcmTZqUfP7zn09++9vfdtybTHIQGG0COKA6rj/WgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBgDWgasDj0nFNAAdUx9WPA6pj4CQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKAGARxQNeBx6bgmgAOq4+rHAdUxcJKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCDAA6oGvC4dFwTwAHVcfXjgOoYOMlBAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEKhBAAdUDXhcOq4J4IDquPpxQHUMnOQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCNQggAOqBjwuHdcEcEB1XP04oDoGTnIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABGoQwAFVAx6XjmsCOKA6rn4cUB0DJzkIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAjUI4ICqAY9LxzUBHFAdVz8OqI6BkxwEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgRoEcEDVgMel45oADqiOqx8HVMfASQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQA0COKBqwOPScU0AB1TH1Y8DqmPgJAcBCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAoAYBHFA14HHpuCaAA6rj6scB1TFwkoMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIMADqga8Lh0XBPAAdVx9eOA6hg4yUEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEEAB1QNeFw6rgnggOq4+nFAdQyc5CAAAQhAYFwTeOGFF5L777+/9/e3v/1tXLOg8BCAAATqEMCe1qHHtRCAAAQgAAEIjDoBHFCjXoPkf6wI4IDqmPxYOKCeeuqpZOXKlcnixYuTRYsW9bZ333138sgjjyR6kbThueeeS3RN0d+6deuS559/3l4e/F1W5vr164N5CgqteVDle+CBB5Lbb7+9x+Xee+9Nnn766ZpSx+byGF/VUV7QzTNWzzpHGB0CMR2w9TtIGysr26bl/5aMskHt0NmsW2+9NbnrrrsStc+HH3441+ZI3/007X5Ve+e3nyIG4tpkUF5lr2+55Zbe35o1a0qJVz5suQf9HeJlM6F7wNq1a5MVK1b06kx19eCDDyZVeBSxdfkfRHeVX9myXXfdNf2LOaGK9Efny97zLKeiMvq8iuLW4VFWtksjtJWMskHXu7Zc5vmja/3NK0dZVlX1sqzcEHt3rEodqIzj3aaOij1VXdW1qWX1q6reurZS1p4W5aOMzfPvwS59f1v1GbYtG1+lrMp/UXzXzqvWUVm5Tn5oKxlVgmRg47PEuq6HYbTvZfTctwFZgtV/jYqNr2vfy3B17bqq/ahOnSsgMH4J6Pljk002SbbbbrvxC4GSQ2AAAjigBoBW55KuHFB6+L3pppuS6dOnp51ufgec299rr72S448/PvFf8vQi4c4XbXfbbbfk4IMPTs4555xkyZIlUTRVZU6dOjWZOXNmcttttzXmkNLDqTpJzz333GTvvfeOlvGII45I5KAbpbB8+fJgefbcc8/k8ccfjxZlwcIFwetU7wsXLoxex4nhI9BmG6siO2Yz5s2blwtNNkvOlkMOOSSqk5I9adKk5Mgjj0xuuOGGPnnHHXdc7rUhB7PsTCzPp512WppGEQPZwCaDbJCfrzLyH3300WTChAmZ63wZVff9+4Itm1heffXVyT777JOmt/vuu6f7ujeccsoppWxpEVs/35Jb9f5QpsNUH2RIt/y0Yvu6f5xwwgnJFVdc0et4s2xCv4vKeNZZZ6WXFcX181WVRxXZfjr+fpm2PMjzR5f6m8LO2anCqko9VJHrc/f3i+pAxcKmbqjcYbenymlTNrWKflXRW0ezjD1V3KJ8+Davi2fYNm18lbKWYeO39Sp1VJQPX25sv6xtwcb3f8Dp2kgX9TDs9r2Mnpd5rnVMi7bDbuObsu9luPptu4r9KGLMeQhAYAMBHFAbWLAHgSoEcEBVodVA3C4cUBolMGXKlExHmjoG5SiKOV40usAFfTlzwQUXDNSRedRRRwVHEdWRKSfaY4895rI30FajnA488MAMEz2gyUET63RUZ/ioBDmZ5FjTg6b/4Kl9dbDHgnRFnez2mjPOOKM32iR2HceHj0CbbayObKdb11xzTRSaOp5ln1xcbeVImTZtWrL//vtnjrs4vnPICZbDOuR0Vzu/5JJLeh2yLq7b3nzzzb10nFy3nTFjRnLHHXe4aL1RRTG7KIeYOtCaDOqcc3nRdvLkyYWjbzRKyr+m7n7MAXXPPfck+njBly/bo/uOWPvHta9OhryRQ3X0q8z9oUyH6aB5ULlnzZrV04+8+s+TL/1ZunRpenleXMvW/i7iUUe2SyuvLdd5/uhKf1PQBTt1WOXVQx25ZepAxcKmZit3mO2pctqkTa2jX3l664iWsaeKm5cPa/O6eIbNy49rV6FtGRufJ9uWtYhNKA/+sbw6ysuHLyNvP8++K+/Y+L+Pph7LehgF+16k500/Nw+zjW/SvhdxzWvbOpent5JNgAAEyhHAAVWOE7EgYAnggLJEWv7dtgNKUwv5X6RPnDgxmT17dm8KIlc0OZv23XffTCeh74By8fR1u/8gc/bZZ/fk6KsrOYQ0mkiOCuv0OOmkk6KjlqzMM888M3nmmWeSZ599Nlm9enVvBMR5552XSVd5OPzwwzNlcHksu7300kszMtVR6jr81DF63333JeolSgoaAAAgAElEQVRw9surjtS6jq+y+Wsqni2nK4/qKi+ozlxc1SlhdAm02casbHW8a2Sh/+fsgxw+Tqe0jXVqrFq1Ktlvv/3SuHKWX3755T274GpBXw5aR2nIAaX4iiu756ddNJrvySefzDii5eyJTQ1iGRxwwAG5zhVXhipbpR1yjIfstC/XduDL6b5s2bJeWVRH1157bYaLpjfUcXVaydmmcvvcQg4ojXT1+eqjBk2tJhkKkqcvo/37kGRqNJTO5QXLtqn7Q9kOU+VNeuoz0H1POq0/N9XghRde2Pcxhzio3EXBllEOVskOBRu3KR5Ky8puoi3Xff7oQn9DnIuOWVZN1YOV20QdqCzY1GyNDrM9VU7bsqlWv5rS2yr2VOWz+cizeV08w7Zp46uUNcSmqTqy+WjKtmDjN0zlq+eEonfTNuph1Ox7SM+bfm4eZhvfln0PcW3Kfkg2AQIQyCeAAyqfD2chECOAAypGpqXjbTqg5MTxRwvIMeR/we8XSV9P+Z2IoY5NO5xdLzChoE43v8NO+xpVEApWZmz4vabXsjLVeTpo8F9q1VGoqThs0DE7Qqyo49rKGOvfqm/LTb/lXIt1cirPV155ZXrd3Llzx7oYpF+DQJttzMqO2QSX/ZNPPjnVq5ADSg9vdrSmRjGFgpwXco46/Y45oHStncZP9q4oHHbYYansE088MRrdMsjLR1RIwYnY1JiXXXZZ7pW2A19fXfrhuuuuS8sojpa1Roo6vtpaB5Qcdf7IJ91D5LwPBU1B499jJE/lyguWbVP3hyodpjYPMR0XG+mJz0v3XK0rmBesfDnmYsHGbYqH0rOyY+V0eStqy008f7Stv64sVbeWVVP1YOXWrQOVC5vaX7vDak+V0zZtqtWvpvS2ij1VGW0+8mxeF8+wNj+xdjeIjbey88oaYtNUHdl8xMroWkuRfVc8bHzSm/rZv+drP+/dtOl6GEX7HtLzpp+bh9XGt2nfQ1ybsh/OLrCFAATiBGSPWQMqzoczEIgRwAEVI9PS8TYdUBo14D8Yy+mSF/yRRiEHlJ3DOu8Fxn95UR7OP//8YNJWZuxhSRfb4fT6smfQ4LO56qqromJURp+hRnGMUojNo68yqfM5FvwvQos6iWMyOD4cBNpsY1Z2nk0Qjfvvvz9tTyEHlNpilfamUZIuft4LrKb2cPG0daNz8mro6KOPTq85/fTTo1EtgzwbFhVScEJTmSrfdnSppinMC34HvtbDsqHIAaX4/lSI1gGlaT59rkXTlNr0NPI2NrJMaVdhW+X+UKXD1OYhT8c1ela64jM59NBDc0fEWfkqRyzYuHm6VoVHiHVeORW/qC3791jxGOT5o239jXEuOt5WPVi5detA5cCm9tfmsNpT5bRNm2r1qyn7UcWeqow2H3k2r4tnWJufvHZX1cZb2XllDbFpqo5sPvLKqHwU2XfFwcaLQrV306brYRTtu5hZDnl6/nfK1f4Pq41v075X5Vr1GbFaDRAbAuOPAA6o8VfnlLgZAjigmuFYWkpbDqgnnngi8ReA15fn+vImLzz00EOJOl31p45dGzTVgt+xJodVLGjRWj+uOoBDwcrMewidP39+RmZR52soPXdMnZ4aqaU/TdEVC9dff30mzTpOr1gabR73v7bzO5JVN5pSUHoSCv7UXLHRa6HrODZ8BNpsY1Z2nk1wZDSF2amnnposXrzYHept5RTyR8jIfsX0079Q6zDJZmn6uFgYxAF17LHHpm0/bxrKQRjE8hk67nfC64VRX1D7tlXnY8G/VqNIbbAOITsCSvH/+te/pun5DiiNEPUdYhoJlTeqUrJkd+2aUHn2xbJt6v5QpcPU5qFIx6XHduSs7iOxYOXnlbFK3Kr3Syu7qJwqT6wtN/X80ab+xuqjzHHLKq/OqtSDlVunDlQObGp/bfo6NUz2VDlt26Za/WpKb6vYU5WzSj66eIa1+Slqd1VsvJWdx7wqm7G0Ldj4DbZlrOphVO17SM+L2twG2sV7w2rj27bvIa559qaK3hZTJwYEIIADCh2AwGAEcEANxm3gq9pyQGmqOL+TsuiruzIFsC9SeQ+M9qtFrT0SClZm3sOSpo/yy6Sv59sO1pGmtT5GKfgv75p73K5rNXPmzGBxcEAFsYzkwTbbmJWdZxOK4Gn0jN++Y7pZJCd0fpQdUFdccUXKRevU2ZfGvJGMcq5rpJn+Qs68Mg4ovTQ7Gb6DyU6LKmdEmWC/AM2rZ6tfTd0fqnSY2jyU0XHx8nVZDqnYeldWfl4Zq8Ster+0ssuUM1bfTT1/tKm/sbyXOW5Z5dVZlXqwcuvUgcqBTe2vzWG1p8pp2zbV6ldTelvFnqqcVfLRxTOszU+ZdlfWxlvZecyrshlL24KN32BbxqoeRtW+h/S8TJvbQDx/b1htfNv2PcQ1z95U0dt84pyFAAREAAcUegCBwQjggBqM28BXteWAstMANTGNmn2Ryntg1GgEvwMuNP2ToFmZeQ9Ldi742KgqTZGhNV7cX94UT0UVp/z45bjxxhuLLhmq8/7L++zZs/vm3lfZVqxY0ZdnHFB9SEb2QBttzMGwsmM2Qc6Pxx57rLdmgLvWbtuwWS6NUXVAyZZpcWa103322ac3lZs4+jZJo78GDWUcUDHZdr2jO++8MxY1c9x2XKlcTThnyt4flJkqHaZlddwv5IMPPpipI9WXjoWClZ93D6wStwoP5cvKHta27DOso7++nKr7llVenVWpByu3Th2oTNjUbM0Osz1VTtu2qVa/mtLbKvZU5aySjy6eYW1+Yu3O16ayNt7KzmNelc3Galt8ztj4Z30cmf1Rte8hPS/T5jKFj/wYZhvftn0Pcc2zN1XsRwQ3hyEAAY8ADigPBrsQqEAAB1QFWE1EbcsB5S8Mr84vTa9XN9gXqbwHRq2V5HeSxtZ/sDLzHpbkQPFlxtJXR6gfr2iu8xgXOa4mTZqUytKUYJryYJSC//KuqbQUNJ2Yz0ejovTQ7gccUD6N0d5vo405IlZ2qE1q2jY3HeiiRYvcpX1bO21ZEzbLJTKqDqhly5albfWiiy5yxUkOOeSQ9LimwRvULtXp3PFto+xJ2fpatWpVmndnh2LToFr9auL+IIhVOkxtHkI6nlaMtzNlypRMOeV4CwUrP6+MVeKWvV+6PFnZoXKWbcttPH+4fPrbOvrry6m6b1nl1VmVerBy69SByoRNzdbsMNtT5bRtm2r1qym9rWJPVc4q+ejiGdbmJ9Tuspr0919lbLyVnce8KpuxtC3Y+A0aMVb1MKr2PaTnZdvcBurhvWG28W3b9xDXPHtTRW/DtDkKAQj4BHBA+TTYh0B5AjigyrNqJGYbDij7MqhOPutgGCTz9kUq9sCor9kPP/zwTOdbrOPZyow9LKmDUtP4uQ5LdbrGvihXWi6etoOu22SdaBrWP2rBf3nXgsEKGkGxxx57ZBjNnTs3UzT9dgzz1mjJXMSPoSTQRhtzBbWyQzbBd2bG7IC1WWrfTdgsl89RdUDJdrl2qEXBXfCnGNH5QdvooB34zz77bJovl7+yTjBN4+eucVtN8xcKVr+auD8oHatvSicWbB5COh661n7tOmfOnFC0Sp2xNi9N8VDGrOxQOQdpy6rjJtuyD3FQ/fVlDLJvWTVVD1buoHWgMlkdx6YmvWdBZ3OGyZ6qvrqwqVa/mtJbq2tKJy+UzYdkdPEMa/MTaneh8pSx8VZ2jLmTXzZ+lXciybZyQ2UsY98ly9Y3Nr7cu2mT9WDrYJTse1kOilc18MycXas7Zm+q2o+q9UB8CIxHArLLm2yySbLddtuNx+JTZggMTAAH1MDoBruwDQfU2rVrMx18WvS9iVDmBUbG97TTTsukr6/1dTwUrMzQw5K+urYvenlrMTXhgNLcyHqgd50VKoO//kmoLMN4zH95v+yyy9IsqjPUlU1b6Yi/Rsz111+fnh+0cztNjJ0xJdBGG3MFsrKlSxrt6P5OOeWUTDuKOaCszdLXtU2GUXRAye5NnDix1w6nTp2awXHfffel7VPMNRXLIGHQDnxNb+rbj6qdH65cTobfGeyXw+pXE/cHybedN0onFmweQh13oWu1JpYrn7buAwAb18oPldFdUyZu1ftlTLby7NqxtoO25aaeP1w+/e2g+uvLGGS/rXqwcgetA5UJm5qt2WG2p8ppFzbV6lfI1gxiP6rYU5W1TD5c7XXxDGvz06SNt7JDzF1Zy7IZpI5sPpq0Ldj4XdN7fd67aah+B62HUbbvIQ5l25zfVuz+MNv4Lux7iGvI3gxiPyxrfkMAAv0E9CyEA6qfC0cgUEQAB1QRoYbPt+GAWrlyZfowrIdbrSHSRLAvMKeeempv6idNvaS5hLUorx35pJE2Dz/8cDR5K3PatGnJ8uXLe/Lmz5+faBSSP9WDpvKSIyXvi+q6Dih9ye+PttIaJXp4HMXgv7yrE9EFOdMOOuigjJ7MnDnTnU601pV0R384oFIsI7nTRhtzIKxspzOxbcwBZW2W2l+TYRQdULJ/jqObPtMxkf2bPHlyel52dhAH+aAd+La+lM8qYd99903zrms1bUooWP1q4v6gdKp0mNo8lO0oufjiizNl9KdQ9Mtq5Yc6DFx8G7cpHpJvZTvdi23LtuWmnj8cA387qP76MgbZt6yaqgcrN8beHY/Vgcpk2+h4t6nDbE9D9aU6rhLK2FSrX03pbRV7qjLZfOTZvC6eYW1+mrTxVnZeWUNsmqojmw9nQ2LbKrYFG79rb5rponfTUP3G+LvjsXoYZfse4lC2zenaWBhmG2/rS/VbJZSx75Jn23lT9qNKXokLgfFKAAfUeK15yl2XAA6ougQrXt+GA0pfk7uHV22bejmwDzZ+GqF9OaP08pgXqsjUyKrYVE1+GpqaT84w9yfnWNmgTtyjjjoq5ac5mzXaYFRD7OVd5fHnynb1t2LFil5R/Qd5HFCjWvt/z3cbbcwRsbI1UtAfNSEntT+SMPYyXdVmaYRi7C/kLB5FB5Sf59D6SmeddVZqp9R+ly5d6qql9HbQDnw7AksfBlQJ9mVadRkKVr+cnQpty94flE6VDlObh7IdJfoS2s+n7kehYOXndVDauL58u1+Fh/JlZXfVlkNMyh4bVH/Lyo/Fs6wse/93lXqwcgetA+Ubm5qtvWG2p8ppFzbV6pevp3a/it5Wsacqq81Hns3r4hnW5qdJG29l55U1xMbWi/+7Sh3ZfHRpW7ItsdovbHx43dRRtu8hPS/b5vK0Z5htfBf2PcTVtxd2v4r9yOPOOQhA4O8EcEChCRAYjAAOqMG4DXxVGw4oO9S7qems7AuMHmbkoJkwYULvT1/YHnPMMYmmHtIopjLBytRoIz1Ezpgxo29BZjm08kZTlUkvL47WrtJUVu4hTeVasmRJ3iVDf85/eddoMhv0AOrKq624a3TFggUL0uM4oCy10frdZhuzskMvkWXWFag6nYjv1PL1V/v+SD9XU/6LqeKUWa/o2GOPTdvAGWec4UT1bcsw6Luo4IAcTq5chx56aDD2bbfdlsZR3AsuuCAYL+/goJ07q1evzqSt9DWtR5kgJ7+tv5hdt2ybuj9U6TC1eQjpeKjcGlHq6lDbW2+9NRStUmeszUtTPJQxKztUzjJtua3njxC8QfU3JKvKMcuqqXqwcgetA5UFm7qhRofdniqnXdhUq19N6W0Ve6qy2nzkOWW6eIa1+Qm1uw3atGGvjI23svPKGmLTVB3ZfITKWMa+K4/Y+EmZe3uVd9Om6mGU7XtIz0P6uKGlFe8Nu43vwr6HuDZlP4prgBgQgAAOKHQAAoMRwAE1GLeBr2rDAWU7+ORIkXOlbijz4Fw1DSvTfzlbv35939pP++23X2atoqrp5cWXg8Z1GKqDNNZhmCdj2M75L++aEsoGvcTIiejKra3Wf7rpppvSYzigLLXR+t1mG7OyQy+RckxohIx0KzYCytostb88myUHqUZ2+nqrfTmaNALShkEcUHKmO/ldO6A0lYtLW1s59+2fpt3z4+y///65zCwT/R60A992figfa9asCSXRd0yjWP18az/mELT61dT9oUqHqc1DSMf7CpkkyQknnJApp75YDgUr3y+jjZ8Xt+790soOlXOQttzU84dlod+D6m9IVpVjlpVfZ3XqwcodtA5UFmzqhhoddnuqnHZhU61+NaW3VeypypqXjw219ve9Lp5hbX5C7c7mS7/L2Hgr22cekpkXf1htCzZ+16Tsu6mt35CuDXKfHaVnZul9GQ6h9hE7Nuw2vgv7HuLq25s69iPGneMQgMAGAjigNrBgDwJVCOCAqkKrgbhtOKCULX314nfylZm6rqg4TT8wKj0r039Y0nk9MOnB3i9LXmdwURli5+fNm5dJ44YbbohFHanj/st7bA2Sq6++OlP2vffeO1H5HXMcUCNV5X2ZbbONWdmhl2llSC/UcjKoUzQW7LRsRQ4NX7edruolLxQGcUD5U3Hm2ZyyDEL5Ch3TCERr81z5irbKS5UwaAf+s88+2zeKqexUpbbe5JyMORst26buD1U6TG0eYjruc1d5ZEf9+nryySf9KOm+lW/LmEZs+X5p8xErZ5m23Mbzh8/B7Q+qv+76QbeWla2zQZ9brNw6daCyYVOT3ojuYbenqqsubKrVr6b0too9VVmL8uG3S/9+0dYzrM1PrN35+Spr461sy9yXWYbNsNgWbPxg76ZWH2K6VuY+O6r2PaTnMQ62fYR+88y8gYrVL2tvBrUfG1JgDwIQiBHAARUjw3EI5BPAAZXPp/GzbTmgNCWA3/m1ePHi2nm3DzZ1HhhdZqxM+7CkeLfcckumLCqXXkqbClo7RV/wOV6hKbyaSqtrOWVe3uUUmDp1alp+cZgyZUr6GwdU17XWbHpttjEru45NsDbr9ttvzwWhDqCJEyemeqoOkVg4/vjj03jS7yeeeCIWNT3u5+f8889Pj9udqgzkiLvqqqt6Tt6Q4+Wuu+5K86p1GiQ/9jdr1qw0rsoluVVCnQ786dOnZ9Iu67SXPXG2VlvVTSxYtk3dH6p0mNo8lNFxe41GmcaCjRsqo7u2TNxB75dWdplyunzZrd92VMdNPH/YNPS7jv6G5JU9ZlmF6myQerBy69SBymLrYTza1FGxp6qvtm2q1a+m9LaKPVU5y+TDtcUunmFtfsq0O3tNzMbbeCHmrqxl2QyjbcHG79p7ril6N7X6UEbXfP3w90fVvof0vIhD3nPzqNj4tu17iGvI3gxiP3y9Yx8CEAgTwAEV5sJRCBQRwAFVRKjh8205oPTFvt/JV8apos5QfaWtL69CockHZyffygw9LClfhx12WKY8evAOdd46uWW3jz32WOZLdXFrQm7Z9NuOV+blXXnQWle+vvj7OKDarqV25bfZxqzsopdIlVTrDKidz58/P1Nwa7NCU0ZmLjBf+MtpGgvnnntuRr8feOCBWNT0uKa8c+1g9uzZ6XG7U5WBpiF0cjUvvA0+hzlz5tjTmd9++5bM2HpRmYu8H3U68LXWnyuHthplVibYqVLmzp0bvcyyber+UKXD1OahjI5fccUVGTYXXnhhrTK6i21emuIh+VZ2mXKWbctNPH84Bv62jv76cqruW1ZN1YOVW6cOVCbflqiNjkeb6jMYZnuq+mrbplr9akpvq9hTlbNMPlyb9O9xsRFQilvnGdbmp0y7K2vjrewQc1fWsmwGeSey+ShTxph9Vz79diXbgo3/uwOq6N20yXqwdTAq9j2k50X6mPfc7HMYZhvftn0PcQ3Zm0Hsh2QTIACBfAI4oPL5cBYCMQI4oGJkWjrelgNq2bJlmQ4wDdXPm/5KxVu+fHnvGo0qCE1/NciDcxE2KzP0sCQZGqXkd3Zqv+4aTRq270+zdcQRR0QZPf7444lGG5T9yr+o3F2dL/vyrvyccsopfYzFGQdUV7XVTjpttjEru+glUjZozz337OmZnEJ+cPbHtfPJkydH26O7zp+CZNq0ae5w31YOJCdX26KRAE8//XRmVOTChQv7ZLoDVRn4o5a0mLcflK5bL0v5DNlhP7542rWgYtMQ+te5/Tod+KEOv6K0ZXO1VpVfF3lltGybuj9U6TC1eSjScX3U4K+rp/28EXdWfqyMqrOycQe5X1rZReXMa8ttPH84nfW3dfTXl1N137KK1VnVerBy69SByjTebeoo2VPVV9s21epXU3pbxZ6qnGXzobhdPMPa/BS1uyo23sqOMVdZFcrGH2vbgo3/e32NZT2Mqn0P6XlRm4s9N4+SjW/bvoe4xuxNVb39u7bzHwIQyCOAAyqPDucgECeAAyrOppUzbTmglFlN3+R39GnYdV447bTT0vj33ntvX1SNGvDlnXXWWX1xqh6wMmfOnBkVccwxx2TS1wgFzZtvgzpB9aWk+9NDXygsWLAgladOwryOUPe1ozrPRyn4D5l504ipTFonzJ/SzNU1DqhRqvH+vLbRxlwqVnbRS+Qdd9yRtrlLLrnEielt9VXejBkz0vPSvwULF2Ti+D/smhl5Dig75URRW/DXQFM+Hn74YT/pzH4VBuq099dO0LQiftDXm67dHXTQQf6p6P5JJ52UXqNr1SlfNlxzzTWZa6u0dTmTDjjggMz1RV9B+1+xKq+xl2OXf8u2ifuDZFfpMF25cmWmjHk6rvo9+eSTM/E14isvVCljlbj/n70zAd5jyP9/dhNXKVQUilSSQspRWVEhKXets9yUqwpBOTchbuv47brWWldECIIVLHEfiXVEXFk5xBmxscK6IkgsQUhEJAr9r/f49+j5PD0zPc90z3ee7/Puqu93nmeenk93vz6fPqY/0z2u/aXOm5SdVU5ck1WX8bvv8YfOp3ksY7+mnKKfJStfdinlltVBu7eprdSewgZDt6nSvnzZbZH2FOUsko8qxrAh2/giZS3KpkgbL/NRtm1BXtnGg4JSHaWHVm3fwayIPWaNm1upjQ/dvtu4+mrjI0PnPxIggUwCdEBl4uGPJJBKgA6oVDRhfgjpgJKTrpdffnnkZLCVRD6ZY3NAyaet7rrrLpuoQuekTEzepYW5c+cmJvYwgTlx4sSG6HKSc+zYsQ1x0EmYT+Lfcccd0aoIrIww/7Dy4YknnohXQ2DCtZXCrFmzYmZZA1FdpkmTJsXx9UR4kUlpLYfH+hAIUcd06aRsPKWYFnDzhVWG2q5gazKgvunfcYRj+IsvvpDRou+yfctyQMFZZdZ3vPMtTS6cQpCl85HXzhVhMH369Fgu5KMd0kHmMS9dfR2cPjqvOBbZhk9eiy13igSzfdF5QBltAQ5+9EE6Hj5LB5y8TrL10T8gjSITpuYT+Mh7mo2jLP/4xz/i8iEutoXEE7pZQZYRK1HTgozriwfSk7LTyom4LnVZ1s+y4w8bk7L2a5Ppck6y8qUHKbesDlCWdm1TW7E9hb5CtqnSvnzZbZH2FGWU+chq80weocawIdv4ImW1sfGlI5kPH20L23hoTCnXe1ObfsvqoRXb96Ic0sbNrdjGm+2ZHgu3wpg5MnT+IwESyCSAsVCXLl3UwQcfnBmPP5IACSQJ0AGV5BH8W0gHlO1pbDx9j63r8BsCJs1efvnl2MGiB0Q2B5Sc7MFqGWxNVyZImdh2a9myZakizVVayCsmkvEkthlcHFDyyWld7rwjVmi0UjDffYMVY+aEt60c+B2TpiYHOqBspFrnXIg6pksvZWOCCNuNmX9YkfjJJ58oWXdtzg60S3JFzxVXXBGthNK2i8lv2CTOm3aa5YBCfuWNOrbvwwSKlou058yZk7B/tHHIe1aQDMaMGRM5t+Dgwt/8+fPVRx99pOBwM7fXQ9tlBqzKMsuD1Za4wc4LmKAyr8Pnf//733mXReXGKivzWrzHSfcNuQL+fwS838iUcdFFF0VlxVZCeEIXfKEvtO06HhyLeLI9L0i2PvoHpIk86bzgiLymBb36Vcd/8MEHoxVx0C3sBf0N3kEgV4/eeeeduQ42pCnLiD566dKl1uzIuL542PLhoy5L2ywz/pBAoEMf9ivlunwPpQcpt6wOUJZ2bVNbtT2FzkK1qdK+fLUfRdpTlE/mI6vNq2IMG7KNL1JWGxtfOpL58NW2sI2H1lTD+NZ2b2rTb1k9tGL7buPQzLi5Vdv4UO27jauv9gOyGUiABLIJYCxEB1Q2I/5KAjYCdEDZqAQ8F9IBhWxjAtC2qgWDY/0uFj2xZh5NBxQmk/E0vvm7/oxJ3Lwthmz4smRee+21qe93wjZxmLzU6eOIsmBye/HixVFSeQ4oDNjNJ/FNWXmfMUHbCgHvlpFP46Nso0aNanDYyfKYWyvhGjqgJKHW+B6yjmXJzqtD+ndsgWkLaLPQpuh4+gjHBiaqUN/1OfOY54CCXEzC2K7HTZo8D4dClpOkLAM40BDwEMDdd99tLdNVV12V+t659957L/EOO5MFyiLfsWWyxhOX5iov81psq4M2tEjAQw22NhUMoTdTPvoMbHmUFbLYlu0fkK7LhCkerkC/IvNvlsX2GU5GrMyFozQrFCljkbhmmi79ZZZsW/ls57Lqctnxh1ke/dm3/Wq5eccsVmXsMkuujbftXJoOUKZ2alM7Q3sKnflsU7Psq4zd6vri0p4ibpF8VDGGDdnGFylrUTaaO455bXxWPmztiO1cXtvCNr5j9dAq7XuendtsT57DuLkztPE+2/c8rj7aeLPN4WcSIAE7ATqg7Fx4lgTyCNABlUfI8++hHVA6u9hiD09uYyAiB3T6OyYQsRUdJmnNlU24EcyahHN9V4nOC455MpGHtGDu+azzjvxhpQWCiwMqy/mmZdqOmChuhSBfZGyWZfLkyblF0BPi4IrtSRhaj0DIOpYn27S3tM+4AcsK2AoTDgCs3IMMs87CwXL//fcrOGH0/vtwrroE1A08bWk6sk3ZcHLBAYYXjWeFsgxQLoTPPvsstU1GudEm28KUKVMyr0N7nhbgSE/TC85jRU/RAB7//Oc/o/d4QT9oO0zGWD362muvORwVjgYAACAASURBVK3qymNbpn9AuVwmTG3b6pjM4GiC3WBbVvDE+6xg05gccQlFylgkrkw7r7/Mk22WOe1zXl0uM/6Q5cH3EPZrS0eey2PVrF3myU3jbp7P0wHK0g5tamdpT6Ev2IWPNjXPvpq1W10/XNpTXZ6sewkzH1WMYUO28UWZF42v2eOY1cbnyTXbkLTPLm0L2/iO10Pd23eXNiDNBvV5jJs7SxuPuumjfXfharatZtuR137IuPxOAiSQToAOqHQ2/IUEsgjQAZVFJ8BvVTmgzKx///33CiucsH0QbvLwBB0mzfAUFQMJgAA6UTw56TqZSmokEJIA3qODmzVMGMExZK4uwXZl2OIONlskoL2DTLSDaAOxVV7a1mdF5DLuL9t+ff755xHXL7/8srBuQjN0nTANnY92lM/xRz20zja1HnpwzQVW7te1Ta1je8oxrKtl+Y/HNt4/06IS2b4XJdax8evcvncsGaZOAq1DAOMObsHXOvpiTutDgA6oinXREQ6oiovI5EiABEiABEigNgTqOGFaGzidPCOYHMVKvFdeeaXQHxypDCRAAo0E2J42MuGZjiHA9r1juDNVEiABEmh3AnRAtbsFsPzNEqADqllyTV5HB1ST4HgZCZAACZAACTRBgBOmTUDrJJfgvW56O58ix6LvReskuFgMEsglwPY0FxEjVESA7XtFoJkMCZAACZBAggAdUAkc/EICzgTogHJG5SciHVB+OFIKCZAACZAACbgQ4ISpC6XOGeeDDz6gA6pzqpal6iACbE87CDyTbSDA9r0BCU+QAAmQAAlUQIAOqAogM4lOSYAOqIrVSgdUxcCZHAmQAAmQQFsT4IRp+6of73776quvCv/x/XDtazMseTYBtqfZfPhrdQTYvlfHmimRAAmQAAn8SoAOqF9Z8BMJFCFAB1QRWh7i0gHlASJFkAAJkAAJkIAjAU6YOoJiNBIgARLIIcD2NAcQfyYBEiABEiABEujUBOiA6tTqZeECEqADKiBcm2g6oGxUeI4ESIAESIAEwhDghGkYrpRKAiTQfgTYnrafzlliEiABEiABEiCBXwnQAfUrC34igSIE6IAqQstDXDqgPECkCBIgARIgARJwJPDTTz+pGTNmxH/fffed45WMRgIkQAIkYBJge2rS4GcSIAESIAESIIF2I0AHVLtpnOX1RYAOKF8kHeXQAeUIitFIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAYE6ICqgRKYhZYkQAdUxWqjA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP377ruvOuaYY9Tf//53/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBtoARvYfffd1dlnn13xbDKTI4HWJkAHVMX6wwqogQMHqi5duvCPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1toFu3bpGODj744Ipnk5kcCbQ2ATqgKtbfzjvvrI488kg1adIk/pEBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuouQ1MnDhRbbPNNuq0006reDaZyZFAaxOgA6pi/fEdUBUDZ3IkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIIA3wFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiTQQgQWL16s8LdkyZIWyjWzSgJhCbBehOVL6SRAAiRAAiRAAiRAAvkE6IDKZ8QYJGAjQAeUjUrAcx3lgFq2bJmaPXu2euutt6LjggUL1M8//xywpK0hGhN87777rvrf//7XGhlmLkmABGpJAG3shx9+qP773/+qefPmqUWLFrVVG/vOO++oN998M/pD2esevvnmm0hPaPt/+umnWmUX44QuXbqo3/72t2r+/PlN5y2UTjiecFdJKB2458A9Zp3rBErhq164E2HMOhNgO1Rn7dQzbxyncZxWT8tkrkiABFqNAB1QraYx5rcuBOiAqlgTVTqgMBE6dOhQ1atXL/Wb3/wmmtDCpJb+Q1523313deGFF6offvghJoEJuTXXXDP3b7311lObb765OvbYY9Xo0aPVp59+GsuQH4rKPOKII9QNN9wQTehKWWW+w/GGvO6xxx6qZ8+eCS6rrbaa2nXXXdVVV12lfvzxxzLJVHrtwIEDU3W1ww47WPMCBmk6vvXWW63X8GS9CZx++umpOl177bWtmUfdT7ODCy64IHFN0Tqc1y4UlefaJrjKTSs3zk+YMCFR9rQv77//vjr11FNV7969I2eBblv1cYUVVlDbb7+9+uMf/6ief/75BjEhdZbHYfjw4Q35afbE3LlzE20p2lCX8N1330XssnTh+hva87wwefJktf/++6tVV1016ge7desWHbt27ar69u2rRowYob766qtMMXlcdX51/+hqt2aieqIddoT0mgnN6iQtrWbGE6H0m6eDyy+/PC7GbrvtltrGQVePPvpoHFd/wDhh/fXXT73uuuuu01Ezj83oIBSztIz6qBOQnaeTKupFXh5Mu0CeYdM6X+Zxu+22U5hYSAvPPfec9TrImDRpUnwZ7AjtgClbfl533XXVpptuqvbaay/1l7/8RT322GNq4cKFsYysD0XLmxdf561o2+UqV8u3HV373Dq1Q1m6SfutI/t8zV3rN2+MhjK46lbLdO3vXOXqPNuOrjbDcdov1tgK/RFy6qNPcrWvonabVq95ngRIoP0I0AHVfjpnif0QoAPKD0dnKVU4oDCBceSRRyYmBDERCmfR6quvHjug9CQpjh988EFcBlx/7rnnKlxjxsn7vPzyy0c30LanyZuViSfATzrppISDLM5ogQ+zZs1Shx12mNITjros66yzjlpjjTUayglH1Oeff14ghY6LevHFFytMoOoy6SMmNm677TZrxmbOnKl23nnnhms22mgjNeP1GdZreLLeBJ555hm19dZbN+i0e/fuCjZiC+PHj1ebbbZZwzXbbLNNdBNoXtNsHU5rF5qVl9cmNCtX1xsc7777brPoDZ/xFO2QIUMSbexyyy2n+vfvH0042hz+mHiSIaTOsjhgsvPVV1+V2Wn6OxxOJr9tt93WSRZWXJjXlfmMtj0tYBURHIGmfNhRv379YmeU/g32et9996WJUllctQzbMc9uzQR9OKCa1YmZD3xGeZsdT4TSb5YO8FDNyy+/HBfjlltusfbx0Accw1i1KAPk4zdTD1qnaBunT58uL7F+b0YHoZjJDPqsE5CdpRPNznb0WS+y8iDtAnn++uuv1ZlnntkwLkQ+r7/+eoks/o4VroMHD060J2jz8cAXftMB+fnzn/9ceCyNMek999yTu4K2aHmz4tt0o8/l6ahZuVo+jnl9LtKoWzuk9Vzk2FF9vslafk4bo6Fczeq2DjbDcVrSMuvcHyGnPvukUHabJMpvJEAC7UyADqh21j7LXoYAHVBl6DVxbWgHFG6osepF32AgvZtuuinxNCe2SerTp08cB3FNB5Qu1p133pmIc95556mlS5dGsvB0ESYwccONGw2dHo7Dhg3TIhqOUuY555wTvevi+++/j7bCGzdunPrTn/7UIBOrDMoEPLFs5nGfffZJTDzNmTNH7bfffok4mLBslXDvvfcm8o6y3nXXXZnZf++99xLXoLx850gmstr/iPopHaqY8MgKeK+GeQ3aBnNFpLxW1uGy7YKU56tNkHLhhINz3PzDSkc4mtFmme1D1mQYtpfDZKaOv+KKK6qrr746ahs1q2+//VadeOKJcRzEtTmgED+0ziSHLbfcMndiU5fD5YitXDfccMNEWTEZ+9lnn+VeLifb4RB66aWXoj4BenrggQcScqdOnRrpD9f961//ilZPaT2kOaDQtpv5w4pgrDJAX6kDvuO8loXjqFGj9M/Wo+Tqy26RmOn4QF9bNJTRiZlW2fFEaP1KHcCZbnsABg9VyIc00pzyZvnHjh2bsAk4+DFWcQnN6iA0M+Q9VJ2AbKmTjqgXMg9pdqH1eO211yb0jPqPOphX90444YT4urPPPluLazhiLGa2LVhdjMlxTF7AYQWH6fnnnx+tlDLjYSyPh6fyQtHyyvi+dCTl+upz694O5elH/l51n192jIb8S93W3WY4TktaXZ37I+Q0VJ8Uym6TdPmNBEigHQnQAdWOWmeZfRCgA8oHxQIyQjugzIlRTMi98MIL1tx9+eWXidVQNgcU3l1g3gz/7W9/s8rCBLcZDw6ptFU0Uia2HLEFOKKkYytvIt0mR58zHVBYCWZztOCm0JyoxCQqOLVKwNOhph4uueSSzKxPmzYtjo/tLVqprJkFa/Mf4Vw17QBPAuYF0/mKlT1ZQdbhsu2ClOerTZBy0/Kpy2o6jLIcUAceeGDMF20UtgtJC1hFoXWR5oDCtSF1Jjlk5SOtHFnn4TDSZTSPY8aMybos+s2cbIeDANsrmeHBBx9MyJbbGJptmM0BBQejucJvlVVWaUhDp4d3JOJ3XQa0/6+//rr+ueEoufqyWyRU1gFVRidmQcuOJ0LrV+rglFNOMbOf+HzMMcfEuoWOb7/99sTvti9wDmh7wDFtXGO7tlkdhGYWsk6Ag9RJR9QLmYcsu0Ce4QAy9aw/H3300TbVxuewuk7HhbMyLcj8pPVFGJciTS0TR2wNnbctqJSfV14Z35eOpNy0cmpOrn1u3dshXZ4ixyr7/DQ91OHeLZTNcJyWtMa69kfIZcg+SdqXr7YuSZffSIAE2pEAHVDtqHWW2QcBOqB8UCwgI6QD6pFHHkncuGZtIYIsX3rppXF8mwMK+0WbN8JpNzGQZd5I4hqsCLAFKTNtMIhr8dSemT7eV9NsMB1QDz/8cKoY5NtM86GHHkqNW7cfPvnkE4WtNHT+sd1ilvPBnJCDw4+hcxA4+OCDYxuALbiEww8/PL4GTpOsIOtw2XZByvPVJki5WflEebEVl647aQ6oJ598Mo6DuNdcc00WqugJejhGEDfL8RNSZ5JDmXbUVljd9mM7UziyNcODDjrIFj1xzpxsR3skQ54DCvHxjh+kaXNAmRPEiINVU1lBprfjjjumRpdcfdktEizrgCqjE11gH+OJ0PqVOsDWwWnh6aefjm0TtoCV1nkBDk9tz2nvU0yT0awOQjMLWSfAQuqkI+qFzEOWXSDPWGWk9SyP0ult6hvb5On4sK+0IPOT1xehr9ByccRKq6wg5eeVV8b3pSMpN6+cLn1uK7RDWbpJ+63KPj9LD7qd0vZW9b1bCJvhOK3R6rSei47TQvdHyGnIPknal6+2rpEwz5AACbQbATqg2k3jLK8vAm6zk75So5xoYmmXXXbxTgJPEOGdP/omAk9NZjkfkAFsPYV3HSE/n376aUOesIWSlodj1k0MtjAy49omE5GAlJk1GHz88ccTMvfdd9+GPLqe+PjjjxWcSfjD1idpwZxQQHmwfL+VArZ2MfWQ5oSEw1GvMMPTpQydh8Chhx4a2wB07BLM1XNZWwlBlqzDZdsFKc9XmyDlZuVTM8K2MocccoiaMmWKPhUfsbWXuXXpqquuGm0VF0dI+YB00cZiO7m0EFJnzXBIy6c8jz5GO0uwhRTe16fbHzjD8/ogc3LD9mCAdAjZJoNHjx4dpSkdUNgqDTrS+Vl//fVl9hu+42aiR48e8TW49pVXXmmIhxOSqy+7hWzNFOnnbQMmM1dWJ5DnazwRUr9FdYAVztguU9sDVsblBdi0jp/1XjApp4wOQjILXSeK6qToGM+1XhSpm8gzVl5qPcsVSJtuumliC2tT1/fff398XZZzW+Ynry/Ctri9e/eOZSNvabsZFGVeNH4RHRUtJ/KS1ee2Sjtk2oTr57r0+R197+bbZjhOa7TAuvZHyGnoPknal89xWiNpniEBEmgnAnRAtZO2WVafBOiA8knTQRZuoEM4oHBzqm+gccTqobJBDtyybprNG3ikv91221mTlzKzBoPm05GQOWDAAKtMnydPPvnkBEe5JZTPtELIwl755qRr9+7dFd5HI8OgQYOicsJB0WpllGXh9ySBkBMbSEnW4bLtgpTnq02QcrPymSRo/4Z33pltbN5T5nYp9rMhdeabg1kC8x1Nb7zxhho/fnyC0YQJE8zoDZ/hFBgxYkT0h3c2yODigMKLqyFj5MiRicsnTpyYyAuc8y7hr3/9a+K6yy67zHqZ5OrLbpGY60S7LWNldQKZvsYTIfWLfBbRAeKb9Qx1Ocu5h3dmaCfASiutlOtMNXVRRgchmYWuE0V1UnSM51ovitqFOX7F+zTxYJbZ1qe9Dy6UAwoc5Xujfv/735smlvhctLxF4hfRkZRbts9tlXYooQzHL2Zb5PtBoSJ6MG0fNl/1vVuRvLqg5TitkVJd+yPkNHSfJO3L5zitkTTPkAAJtBMBOqDaSdssq08CdED5pOkgK5QDCoMq84Y5azsQh2xGUeTALetm8rXXXkuk72MFFCY0zTJhZULIgAkn8+l3rCKzvdA8ZB58yJYv1L788ssTYs2bejx9ytC5CISc2AAp3+2ClJd1g1ikTZBys9ovFwuQjgkfbaxON6TOfHPQecZRv5tjk002iU7jyf3lllsubrex7UuZ4OKASpOPlXxm/zF9+vS0qInzcqu2tIlfydWX3SIzrhPtiYz//y8+dBJiPGHLaxn9Ql4RHSC+XOGMSf60YL4zI28LNCnDhw6kTP29DLPQdQJ5LKKTIu05ZLvWiyJ5gFxzEv6OO+5Qb731VrxCHG0I0rU5K0M6oL744guF99DpNgxOCtvDREWZF41fREeSe9k+t1XaITAtGurS53f0vZtvm+E4rdES69ofIaeh+yRpXz7HaY2keYYESKCdCNAB1U7aZll9EqADyidNB1mhHFADBw6Mb1Rxw5r30mKHrDZMJGTdTN58882J9PHOJVsoMhjEljf65hvHtPRxA4WtdfRf2hPrtvyY5/A+F50ebvafeuop8+eW+YwtDXr27BmXBTan7QEOtS233DL6bY011lDY7oehcxEIObEBUrIOp9VLxHVpF6S8rBtE1zbBNZ9wOvft21etvPLKyra9m2kZW2yxRVynfLWxWn5InUm+WfrS+XE5fvTRR/EE6fDhw+NL9t5775gT3glVxolfZrK9X79+cT6grwULFsR5zPqArWl1P4AjthK0BcnVl90iLdeJdpkvXzoJMZ6QecX3MvrF9UV0gPhyUh8rgdMC3oWn7WDatGlp0RrO+9JBg+D/f6IMs9B1AlksopMi7Tlku9aLInmAXNMBhfeRIMiJUdtDVSEdUMgDVqNoG8TRtjUs4hUtb5H4RXQk5dr6miJ9bqu0Q5HBFPxXlz7fZYxW1MY60mY4TksaYp37I+Q0dJ8k2ySf47QkaX4jARJoNwJ0QLWbxlleXwTogPJF0lFOKAfUKqusEt+orr766o65yY4mB262m0ktATfnPm+UMWm51157xTIxCWh7TxXSnzx5chwPeSi6qmfhwoXqjDPOiGXA+YT3irRykNu36O3CsL2M1tOYMWNauYjMewqBkBMbSNJ3uyDlpd0gFmkTXPOJCT1dH9COZAVza0s4b32GkDqTfLPa8SJluuKKK2J22KZJBzmhhS1xmg1lJtvNyWq06UUcYWZ/CvuwvctKcvVlt2Bl5t228iKNpy+dmOX3NZ6w5bmMfiHPVQdm2ttvv31st+CMG0gZcA7lhu7x7jBMmrsGXzpIS68MM9OuQtQJ5NlVJ0Xbc8g2859VL1zzoBmbDqi///3v0Wk8nAMHuu4fcJQPKeCdovp3n++A0vnCyjstH0dspWULRcvrGr+ojqRcW19TpM9tlXbIppO8c3Xp8zvy3g2MfNsMx2lJy6tzf4Scmm16iD5J2pfPcVqSNL+RAAm0GwE6oNpN4yyvLwJ0QPki6SgHgy3f74D64YcfEjepG2+8sWNusqPJgZvtZhIScMOOgaO+Ud5tt92il5jbpEuZaYNBc9AMuVdeeaVNXHSuqAMK25g8+eSTCmnvtNNO0RPuOu877rijwpYjrR7w8mbzyTI48N577714i8Gtttqq0IRsq/Nop/yHnNgAR1mHy7YLUp6PNsGWT2ylhifW8YcndIcNGxZPMqP+ZzmgQrWx2i5D6kzyTdOXzovLERPyffr0idp8uUWdfGfIRRdd5CLSGqfZyfZly5bF/RF0W9RhCKeD7hNwnDt3bkP+JFdfdouEzEmZrIl2M1O+dBLa1s08N6tfLcNVBzo+jldffXVCt9KpgDjmeynkFramLPnZlw6kXPN7s8yqqBPIp6tOiozxdPld64VrHrRc0wF144036tMNWzb2798/4bAcN25cbEshHFDnn39+LB/tEJz7tlC0vK7xi+pIyq1zn2tybLZOmTKKfq5Dn9/R925g1ko2UwedFbGzOvdHKEcVfZK0L5/jtCK6YFwSIIHOR4AOqM6nU5aoGgJ0QFXDOU4lhAMKE1TmZNm2224bp1fmgxy46YlLPBX58ccfR5M0p5xySiJtPDGK69KClDl06NAo/pw5cxTe0YEnSnHTqsuDbfUwYZQVijqgHn744Vi+TkcfsVKoMzigwGvChAmJcprvE3jllVeykPK3FiYQ8iYZWGQdLtsuSHk+2gRbPnUdTztmOaBCtbHazELqTPLV+tJpN3PEpL3mqLesMuVgW0P9OxzhzYZmJwalvnr37l0oCwMGDIjzj3LMmjWr4XrJ1ZfdIiHXiXYzU750Itn5Gk+YedWfm9Wvvl7qIG1yScfH0XQ2QLcXXnih+XP0eciQIbH+MTZxDb50kJVes8ykXkPUCeRb6qQj6oXMQ55dmDYxatSoGD8mcM0Vc7CXG264If790Ucfje0khAPK3BYaaY8YMSJO2/xQtLwyvi8dSbnIc9ZfR/a5Jr9m65Qpo+jnjujz63bvBmatZDMdobOidmXGr3N/hHxW0SdJ+/LV1pmc+ZkESKA9CdAB1Z56Z6nLE6ADqjzDQhJCOKAwQWLe5GHfeB9BDtyQxlprrZVYMWSmi5Vdr7/+embSNpmmDPPzSSedFN2cZApUKnKG4UZd/2Xd1ELWpEmTFFYAbbLJJtaywFFz3nnnqaVLl+YlXfvfd9hhh4RtgO8f/vCH2uebGWyeQMibZOTKVofLtAs2eWY7YH52bRNs+cRE4gUXXBD94cnyww8/PFH/s9oN7KNv5iOvjcW2ntjCyfZna1dC6kzy9eGAOv744yMeWPmK9+rIgPbT5GVu0SfjZn1vdmJQrsJab731spJp+G3rrbdO5P+DDz5oiCO5muWVn4vYLRJqxgHlSyehxhMNADvgHVA6D+YKt80220yfjo54h6Lmv+uuuyZ+y/viSwdZ6dS5TiDfdagXMg9FHFDy/aUzZ85MrPCHbUA+wvjx4+N2IoQDCg4nsy3B1sq2ULS8Mr6ZhvxcpO2Scsv0ua3UDtl0kneuyj4fOi0zRkNZpG6lnZjfO8pmOE5LWl2d+yPktNXHaUna/EYCJNBuBOiAajeNs7y+CNAB5Yuko5wQDii8n8Ic/Jd54twshu2GY4UVVlBdu3ZV3bp1i7Zg2mOPPdTJJ5+sXFfUSJnrrLNOtOIJT5zrSR9dlrPOOkstWbLEzJL3z3jCFU9hXXLJJQqrrXTaOOJJqVYPL730UqJMK620Ujx50uplY/7tBEJObCBFWYdRV8q0C1KerzZByrU5XvBODV3nsxxQaId0PBzz2ti11147Ed+81nzCXmswpM5cOOh8uBwXL14ct5X77bef9RLzyVuU3Vw1YL0g5WSzk+3z589P8Id9FglSfwsWLGi4XHL1ZbdIyOwL0T/lBZ86CTWesJWhWf1qWVIHeY4GfR3eE2nWyXnz5umflLmqBe9MdA0+dZCVZrPMqqgTyLfUSUfUC5mHPLswV0CNHDmyAf/pp5+esJdjjz02ioOtnLUdhXBAyS34pk6d2pA3G/O88ko+vnQk5Zbpc1upHbIqJedklX0+bLTMGM1mY3W0GY7TfjW6uvdHyGkVfZJsk3zZ7a+k+YkESKBdCdAB1a6aZ7nLEqADqizBgteHcEAhC+aLV3v16lUwV/bocuBmu5m0X5l+Vso0b5Rx84Abe31DjyNWVeF9RlUEPOUOB42Z/jPPPFNF0kHTMCdT991336BpUXjHEwg5sYHSyTpctl2Q8ny1CVKuLZ8YPHbv3j2q81kOKJR7tdVWi9uGnj17Zioa7wYxt6HTxY8QrQAAIABJREFUbcpee+2lbFt6hdSZC4fMwogf77nnnpjDuuuuqw466KCGv3322SeOg7IXXUmik2x2sh2rWDRzfcSEpkvACjVzu1Ks8sKDCjJIrr7sFukUdUD51kmI8YTkh+/N6lfLytKBjmM7TpkyJWEfY8aMiaMNGjQo+g0PpGASzzX41kFaus0yq6JOIM9ZOik7xnOtF1l5sHE1HVC2LZ/hgNb9hG5PXnjhBfXss8/GdhTCAXXiiSfG8pEuVnjYQtHyZsUvoyMpt2yf2yrtkE0needaqc9HWaRuffV3Um5Zm+E47RfLq3t/hFxW0SdJ+/Jlt3n1m7+TAAl0fgJ0QHV+HbOEYQjQARWGa6rUUA6ozTffPL5RxYQZBnZlgxy42W4MiqYhZZqDQcjCC9CxNZ6+yccRW+tVFW6//fZE2thKotUDnvjSPOmAanVt5ue/mYmNww47LLaRs88+OzMRWYfLtgtSnq82QcpNy+eiRYsUJhgxkMwKso3FRF1WePvttxOODKwaRVq2EFJnrhxs+bKdw0MBuj1xPaJPsq0issk3zzU72Q4Z5mQ18on3FrqE2bNnJ8rXo0cP62WSqy+7lXl3WQHlWyfS1n2MJ2wQy+gX8vJ0YEsT5zDOMO0D9Q8BW2cuv/zykf6xfVGR4FsHaWmXYWaWOUSdQJ7zdFJmjGfmP6te5OVBsjUdUFdddZX8OfoOJ6XZ3m2xxRbRds76XAgHFNLQ8tGGpvVRRcubF79ZHUm5vvvcurZDVoPJOdlKfT6KInXrq7+Tcn3bDMdp2e9h0+0Ljs2M08r0R7Ars01HHlppnJZTxfkzCZBAJydAB1QnVzCLF4wAHVDB0NoFY7CFiQrf4YADDohvVDGIy3sXk0v6rjcGLrJ0HClT3sQg3nPPPZcoC55Ezpps0LJ9HPE+E3NAPnDgQB9iO1QGHVAdir/yxPGOL23DWMlhW70hM4VVLPqayy67TP6c+C7rcNqEQeKijC9Snq82Qcotm0/Zxs6YMSOjVL/81Lt375hr//79U+OH1FlRDph8fOyxx9TEiRMb8mvu2b/bbrup//znP6l/F154YVx22BYmKoqGMpMbyJ+2aRyxWsElyNUxerstea3k6stukY45KZPX94XQibR1H+MJyQ/fy+gX17vowJYuzh111FGxfYA37B5b7mmbwTaSriGEDtLSLsMsdJ1Anl100uwYz7VeuOTB5OvigPrpp5/UNttsE9sH7OSUU06Jv/t2QMFhb67E7NOnj5nlxOei5XWJ34yOpFzffW5d26GEMhy/1KnPd8my1K2v/k7K9W0zHKeFHaeV6Y9gd6H7JGlfvuzWpc4wDgmQQOcmQAdU59YvSxeOAB1Q4dhaJYdyQOF9SXriBMfbbrvNmn6Rk3LgVvbGAGlLmbbBIOJhpY5ZHmxFUlUwt3DwtZ1hVXm3pUMHlI1K5z138cUXJ+pO3gQ2SOy4447xNXfccUcmHFmHy7YLUp6vNkHKdcnnpEmTovcVTZs2rYEBVoaZbdItt9zSEEeeMFeS/P73v5c/x99D6qwoB7zPT5cTDnkzXHrppfFvee/HMSd1Ie+II44wRTl9LjO5ccEFF8R5RfonnHCCU5pyFezYsWOt10muvuwWiblOtCNuCJ2EGE/YIJbRL+S56sCW9kMPPZSwDzic9t577+jceuut5+S413JD6EDLlscyzELXCeTVVSfNjPFc64VrHjRbs61KWwGFuJjMNp1Cup3E0bcDStrnlVdeqbPbcCxaXtf4RXUk5Zbtc1ulHWpQiMOJOvX5DtkNVq992wzHaWHGBGk2UqY/gszQfZK0L5/jtDQmPE8CJNAeBOiAag89s5T+CdAB5Z9ppsRQDig8iW7eDO+www6Z+cCPn3zyiVpjjTUUntL/8ssvG+LLgZvLzWSDEHFCykwbDM6cOTNRHmwNgC2tmg1YBaL/smQgDl7Wq1luv/32WdFb4jc6oFpCTd4yKSfQX3311UzZeLJ7rbXWim0+771nsg6XbRekPF9tgpTrks9NN9004nDRRRc1MHvzzTdjRmgftt1224Y48sSWW24ZX7PTTjvJn+PvIXVWlAPegaLbP/O9SbATvPMJv7lu1WKuAEPft2zZsrjMLh/KTG7MeH1GXA7keZVVVnFK33wiF9smpm0dKLn6sltwcZ1oD6WTEOMJm77L6BfyXHVgSxt6hR1rW8eKBP09bxWoKS+UDsw0zM9lmIWuE8inq06aGeO51gvXPGiurg4oxD/55JNjm9G2g6NPBxTayQ033DBOB+P0rPeRFS2va/yiOpJyy/a5rdIOaTsqcqxTn++Sb6lbX/2dlFvWZjhOq3acVqY/gt2F7pOkffmyW5c6wzgkQAKdmwAdUJ1bvyxdOAJ0QIVja5UcygGFxPCCe/OGeNasWdY86JOYZNHxP/jgA306Ps6bNy/+HfGwpVLZIGWed955qSLxxLzOH454sb0tfPPNNwr78+s/3IDIsN9++0Wy8FJjc1JVxoNTzkzzjDPOkFFa7vvqq68el2mPPfZoufwzw8UIyK1zbr755kwBkydPju1jueWWU3LVi7xY1uGy7YKU56NNQJ6l3LyJjXfeeSfmYHsRPWSi/pjtQ96WQGbdy3JAhdRZUQ7bbbddVEZMxpvbNz799NNx2V23kcWqI5PXU089Jc0p87v5Im3IAaciYcCAAYn001YzaZnmRDTSy7JtydWX3SIvrhPtIXXiezyhGZvHsvqVOjj//PNN8bmfd91114R9aFvFlnquIaQObHkoyyxknUB+pU46ol7IPOTZBcbKWvfDhg2zYY/PffXVVwrjSB1fH7McUDI/eX3RFVdckZB/ww03xOnbPkj5eeWV8X3pSMrNK6dLn9sK7ZBNJ3nnquzzs/qxvHzq36Vu62wzHKf98t6nKsZpZfsj2FfIPimU3ep6wSMJkED7EqADqn11z5KXI0AHVDl+ha8O6YDCDbC+GcYR2z+lvbR37ty5iUkumwNKTsbhyc+yQco89dRTU0UiT127dk2U6dFHH22Ib06go9znnHNOQxztgMLvI0eObPhdnzj99NMT6eVNMOvr6nrE09n6qW6UHS+1ZujcBH788UfVr1+/2I6xuunbb7+1Fhr2ceCBB8ZxBw8ebI1nnpR1uGy7IOX5aBOQXyk3bzLspJNOijnceuutZpHjz1gdhnqk/373u9+lPpn+1ltvxfEQP8sBFVJnRThgtZwuG/oqM5iT9a46HzVqVCwPcvHenSLhxhtvTFw/bty4IpcrbKtmbpm10korRU/c2oTgZeXmlok9e/bMfFhBcvVlt8ibqwMqpE58jydszMvqV+oA/XeRIO0TNuo6aafTCakDnYZ5LMssZJ1APqVOOqJeyDzk2cWLL74YtzPYEiovoH/Q7aQ+Zjmg3n333UT8tL4I/TGcBVomjptttlnqOF7ns2h5ZXxfOpJy08qp8+3S57ZCO6TLU+RYZZ/v2l9n5V/qts42w3HaL2NUV73LfrDIOK1sfwSbC9knhbLbrLrC30iABNqDAB1Q7aFnltI/ATqg/DPNlBjSAYWn1YcMGZK4ecXkyOzZsxN5glOlR48eiXg2B5T5Qm7cCG+wwQYKN01lgpSJlzpnBaxAMm/G1157bSWfTi7qgIJDBvvrm0/3oxMZMWJEwllz2mmnZWWtJX6bMmVKgh/K7vJOoJYoHDOZSkDegGNbMUyCmQFOqYMPPji2j5VXXlnhieS8IOtw2XZByvPRJqAMUm7aZBgcD9dff33MAe3NAw88YMWANuO4445LxMWKITibzADWAwcOTMTLckDh2lA6c+EAW3j22Wej7Vh1ewsHjA7goc/jCAenS5CTqdjSzrZCNU3WQQcdlEi3mXcBYjWB6YTq3r17VFbz4QzYvfkeNLwHMG/rSsnVl92ChYsDKrROfI8nbDouq1+pA9TFIuH9999P2BdsG090u4bQOrDloywzyAxVJyBb6qTqemHLQ55dmJOoe+65pw174hzGwXiYx2wTsxxQcJybcc2+CCv40f5cd911qn///ol4hx9+eOoDDmaGJPO88sr4vnQk5ZrlNPNbtM/1eV9j5kN/9lGntKwix6r6/LJjNJRJ6rbuNsNxWjXjNF91J1SfFMpui9RzxiUBEuicBOiA6px6ZanCE6ADKjzjRAohHVA6oZtuuinhSMEqIixxx7YE0vGkb4pNB9TSpUujF5tjEk7/ro94Onj8+PE6KedjlsxBgwYp7PVuC3g3FSYMdfo4ogxYxQSZCEUdUFoWVi8cdthh0dZ+cGzp8zhiohMdS6uGhQsXRozMLcB0+TbaaCN1yy23lHYmtiqbdsn38OHDFbbU03qH8xE2j60s0R6YK+PQLr322muZaLLqcDPtQpa8Mm1Cmly82w31wfzDe4E0H/M4YcKETBZof0x++AynzP777x9NTJq/abl5Digk6FNnaRyQHzhk9J/OnzzCVvAuEmxJJVeiIu4BBxygpk2bZuX03nvvqXPPPdd6Hdrvu+++23qdPvnSSy+pI4880qqboUOHqjfeeENHdTpOnDhRrbnmmgl5yy+/fLQqDZO+pr769OmjkP+0kMW1jN2a6WU5oKrWSdnxhFku/bmsfpvVgU7fPPbt2ze2ixVXXNFpwr9qHSC/ZZmZZcZnn3UC8prVicsYT+c9q140kwc43rHiyZSLtu3YY4+NVnLpdG3HV155JbYbXGNzQMHJjb7CNpaGMx5/st3Fd/RV1157beIhKVseijIvGl+nmaejNLk++9w6tkOaT5ljVX1+M2M0lCtNt7DTMv1dmlyfNsNxWrhxmu/+CLbms09Ks6+ydlumrvNaEiCBzkWADqjOpU+WpjoCdEBVxzpKCTe6Rbd4aSaLmBg85phjFCZXzKe/zZtdOHb23ntvhZdywmGhA242067B9VtttZWO6nzMk3nppZemyrrvvvsabtIxYahfDu/igMKqn7vuuityONkmA1AuTEjiiVPIa/Ugt/8y9Y7PmPjAAJ2hcxOAYxlPKJoTbOZkO1a54MlD1M+8kFeHi7YLefKabRPy5Mq6YPs+derUPBzRhCO2KoFDF+2lyRWOP7S/L7/8sjr00EOj9gurUV2CL52V5YAnnD///POGttfkdfTRR1uLZK4oMOPrzxtvvLH1On3ykEMOyUz3zDPP1FGdjygLtg3CygXtmNU6g/5QXqxogXMhK+RxbdZuzTTN+ipXrHaETsqMJ8xy6c9l9VtGBzoP+oj35Wi7xEoLl9AROijLzFYuX3UCssvoJG+Mp/OeVS+ayQNW02vdyyNWJOWF448/Pr4ebb0Mc+bMiX+X8vEdY06sPF533XWjFZhYdf/YY48lxuNSpvm9KPOi8c20snSUJ9dWdnnOpc+tWztk8inzuao+v+gYDWXK022z/V2eXGkftu8uNgPHMMdp/sdpIfoj2JuvPinPvpq12zL1nNeSAAl0LgJ0QHUufbI01RGgA6o61lFKVTmgzGItXrxYTZ8+Xb3wwgvRSiPcFGvnjRmvnT6DCbbfwQ0ttlr66KOP6JBpJwNos7JiOy289w1tAF64ji3ivvvuuzajEK64ixYtipjOmDFDffbZZwrv8tABDg20MXmODR1fH6kzTcL/ETcNeDfA22+/HW3pWlQ3/nOUlJg30Z6MXe23zjaewIQXHjrBn3T2VUu2Y1Ore50AnbrVCzzEg/Z+/vz5Hau8Nky9s7VDUCH7/LCGzHFaWL6+pbdCn+S7zJRHAiTQOgTogGodXTGn9SJAB1TF+ugIB1TFRWRyJEACJEACJEACTRKo20R7k8XgZSTglQDrhVecFNYkAUw6vfjii9EDbHiIzfXv66+/bjJFXkYCJEACJEACJFAnAnRA1UkbzEsrEaADqmJt0QFVMXAmRwIkQAIkQAItRIAT7S2kLGa1MgKsF5WhZkIZBGbOnJm5taJtuzicmzRpUoZU/kQCJEACJEACJNAqBOiAahVNMZ91I0AHVMUaoQOqYuBMjgRIgARIgARaiAAn2ltIWcxqZQRYLypDzYQyCNABlQGHP5EACZAACZBAGxCgA6oNlMwiBiFAB1QQrOlC6YBKZ8NfSIAESIAESKDdCXCivd0tgOW3EWC9sFHhuaoJ4B2Ps2fPLvy3ZMmSqrPK9EiABEiABEiABAIQoAMqAFSKbAsCdEBVrGY6oCoGzuRIgARIgARIoIUIcKK9hZTFrFZGgPWiMtRMiARIgARIgARIgARIIIUAHVApYHiaBHII0AGVA8j3z3RA+SZKeSRAAiRAAiTQeQhwor3z6JIl8UeA9cIfS0oiARIgARIgARIgARJojgAdUM1x41UkQAdUxTZAB1TFwJkcCZAACZAACbQQgXvvvVeNGTMm+vvuu+9aKOfMKgmEI8B6EY4tJZMACZAACZAACZAACbgRoAPKjRNjkYAkQAeUJBL4Ox1QgQFTPAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAl4JEAHlEeYFNVWBOiAqljddEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvUPGDBA7bnnnmrzzTfnHxnQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGa28CWW26pNt54Y3XCCSdUPJvM5EigtQnQAVWx/jbYYAO10047qV69evGPDGgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaQM1tYP3111frrLOOOu644yqeTWZyJNDaBOiAqlh/3IKvYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEOAWfCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiUI0AFVAh4vbWsCdEBVrH46oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBOqBKwOOlbU2ADqiK1U8HVMXAmRwJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJlCBAB1QJeLy0rQnQAVWx+umAqhg4kyMBEiABEiABEiABEiABEiABEiABEiABEiABEiCBEgTogCoBj5e2NQE6oCpWPx1QFQNnciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRQggAdUCXg8dK2JkAHVMXqpwOqYuBMjgRIgARIgARIgARIgARIgARIgARIgARIgARIgARKEKADqgQ8XtrWBOiAqlj9dEBVDJzJkQAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkEAJAnRAlYDHS9uaAB1QFaufDqiKgTM5EiABEiABEiABEiABEiABEiABEiABEiABEiABEihBgA6oEvB4aVsToAOqYvXTAVUxcCZHAiRAAiRAAiRQawKLFy9W+FuyZEmt88nMkUAzBGjfzVDjNSRAAiRAAiRAAiRQPwJ0QNVPJ8xRaxCgA6piPXWUA2rZsmVq9uzZ6q233oqOCxYsUD///HPFpa93cmD0/fffR3/1zilzRwL1JPDNN9+oefPmqf/973/qp59+qmcmmStvBNBmfvjhh+q///1vpPdFixa1Vb/yzjvvqDfffDP6Q9nrHupcPzE26tKli/rtb3+r5s+fX3eUzF8NCdC+a6gUZslKgPdkViy5J+tcx3MzzwiFCXCMyTFmYaPhBSRQEQE6oCoCzWQ6HQE6oCpWaZUOKEwKDh06VPXq1Uv95je/iSZ3MMGj/5CX3XffXV144YXqhx9+iElg8njNNdfM/VtvvfXU5ptvro499lg1evRo9emnn8Yy5IeiMo844gh1ww03RJObUlaI72+//bbq1q1bzObrr78OkUwQmQMHDkzV1Q477GBNE/pK0/Gtt95qvYYn600ATmXUyTS94vy6666rNt10U7XXXnupv/zlL+qxxx5TCxcuLFWwyZMnq/3331+tuuqqUf3R9ahr166qb9++asSIEeqrr75KTWO33XZLzfP666+feh1+QPujy3vWWWelxr3lllvieDq+Ph599NHRdWgv9TnzuN122ykMMtPCc889Z70OMiZNmpS4LFQ76CrXLJf8PGHChERe0768//776tRTT1W9e/eOnAW6P9HHFVZYQW2//fbqj3/8o3r++ecbxJx++umpvNZee+2G+DiBPkrmV3+/4IIL4mvyOAwfPjyOW/bD3LlzE/3qVVddlSvyu+++i7jpvJc59uzZMzc9RChbPyEjj6suhx4TNNN/awcU7Ajp2UJePi6//PL4sjrUZ5u9oU3QvGzHZ599Ni5DkfLiorz4Znp9+vRRAwYMUIcddpi6/vrrM8daeXI1d9p3rLqGD61o3yHHFK42pUHmxde2XbQNcpWr5duOrn1nM/dkHVGnNHPbMaRN2NLDOR99WLuMM13tuc71hGPMX2pCM2NMXFl1m+Gjfoay27Q2hedJwAcBOqB8UKSMdiRAB1TFWq/CAYXBx5FHHpmYHMOkICZrV1999djJoicMcfzggw9iErj+3HPPVbjGjJP3efnll48mtW0rH5qViaehTzrppISDLM6opw8//vij2nrrrRNlbSUH1MUXX6ww2S/1A2fDbbfdZqU0c+ZMtfPOOzdcs9FGG6kZr8+wXsOT9SaAOvbnP/+5cL1dY4011D333FN45QpWKcDRYNod6mu/fv1iZ5T+DW3DfffdZwUIh6c5OaevwUSPnti0XYjJHB0Xx4033tgWLTr373//W+2yyy6J+LhmtdVWUw899FAUB3X+zDPPTDiitXxM0KYFrPgaPHhwQjYc/nD+4zczhGoHm5Wry4fj3XffbWa14TOeRB0yZEiiX1luueVU//79I8en7SEHOJtkeOaZZxraW6TfvXt3hbbMFsaPH68222yzBGNcs80220STU/qaLA5wur766qs6aukjHE4mv2233TZXJp7eNq8p8xmO3qzgq34ijSyuWWUo0n+bbQAmI2whKx94mObll1+OL+vo+rznnnta7Q2TSnDQgY3JDvUH47aPP/44LkOR8uKirPhmWrbPyA/aP4yHZMiSa3KnfUtyv35vVfsONaZwtSlNMCu+zZ71ubw2qFm5Wj6OeX0n0mj2nqzKOqVZZx1RllA2IdP12Ye1yzizWXuuQz3hGDNZA5oZY0JCVW2Gz/oZym6TRPmNBPwSoAPKL09Kax8CdEBVrOvQDihMumDVi745Qno33XRT4gl+bBmEp191HBxNB5RGcueddybinHfeeWrp0qWRLEwQYTIPk6xyImXYsGFaRMNRyjznnHOi9z5g67t3331XjRs3Tv3pT39qkIkn7kOFq6++OlFO8GglBxS43HvvvQ1luOuuuzKRvffee4lr4Ezg+zcykbXEj9C7WbexQgQ3dhgowSGCSdrzzz8/egrfjId2Y9asWU5lnDNnjtpwww3jdLDKEqupzHqD7zhvpjFq1CirfNR7Mx4+T5kyxRpXn4Qsec2XX36pf7YezTxjwnf69OkN8a699toGuWhH0ybFtYATTjghvu7ss8/Wp63HUO2glAuHDh4IMP8wwfz5558rtNMmv6xJNGwvh4lmHX/FFVdUaDfRH+jw7bffqhNPPDGOg7g2BxTi4zo4PrU8HOGYygp4h4p5Dfowc+Wuea3ksOWWWxZ2sJry5GdsX2vaEvIPe/rss89k1MR3OTEAZ+1LL70U9YHQ0QMPPJBgMnXq1Eh3uO5f//pXtHpKM8tyQPmun7oQkqvP/ttlgj4tH3BO2h58QfyOqM9bbLFFrr2dcsopCV2j/UgLkntWeSFDxv+///u/aFIK9Q59AdqyadOmqeOPP77h4ZWslaRSrswH7TtNgyrxkEVeX5LH2UylCvsOOaYoUlabbftqg2Q+fPWdZe/JqqpTpk25fA5pE0g/RB/WTuNMac91ryccYyZrXbNjTEipos0IUT+R91B2m6TLbyTghwAdUH44Ukr7EaADqmKdh3ZAmZOEmKB64YUXrCXERK25GsrmgML7LfRkF45/+9vfrLIwcWjGg0MqbRWNlIltwGwBNwrSsZU3QWmTk3cOqyjwFL+Zf3w2J9LzZNTldzxhaZbjkksuycwaJqB0fKw2yZu8zxTGH2tDQNaxtHoLZyO2n9M2gCNWBGVtl4dCwoFhrkZZZZVVoncA2QDgvXP4XaeBSfrXX3+9ISpkYks3HQ9HcyVAwwVKKawwMOPjc95WOGYdwdZTtgAHnZSL73qrPts1OIdt/vR1Y8eOTYsWnZc68tUOSrlputeZMx1GWQ6oAw88MC4b2mVsuZEWsPWe5pDmgMK1++yzTxwP8fEEZF7Yb7/94muwGistSA5Z+UiTkXUeTiNdRvM4ZsyYrMsSEwNYtYr+xwwPPvhgQq7cwtBss9McUCHqp86j5OrLbiG/iANK5gPOnLTQEfX5jDPOSMtOfB5thGk7999/f/yb/FCkvLhWxs9qB+REMuwKDmpbkHIld3Pii/adJNiZ7DvNnpoZU+TZVJJio237aoNkPtLKqPPj2neWvSerok7pMhU5uvJqxiZC9WHtNM6U+ql7PeEYM1n7mh1jQkroNiNU/UTeQ9ltki6/kYAfAnRA+eFIKe1HgA6oinUe0gH1yCOPJCY0sraNQrEvvfTSOL7NAYWtYswJkqwbMvNmDNfg6XhbkDLTBsW4FiuuzPTxHhCfAYMobJtkpqE/t6ID6pNPPlHY6kyXAQ7GrEntThTiAAAgAElEQVTdY445Jo4Lhx9D5yAg61hWvUWJMTmvbQbHrCfxEd90tCA+VmZkBTmpvuOOO1qj48l7Mx94t1JagF3bHMfm+4Bs1+J9TjqNiRMn2qJEq8B0HHmUDgFTALYx1PGffvpp86eGz1JHvtpBKTdP9x9++GGc5zQH1JNPPhnHQfmuueaahvKYJ/B0PyaxETfL8XPwwQcn5Joy0j4ffvjh8TVwdKUFycF336H7u3XWWSexkvCggw5Ky1J03pwYQPsrg6wrNnvDuyzANs0BFap+Iq+Sqy+7hewiE/QyH9gyOC1gVaeul/Jo46vlhKrPWj6c5WZ+sGI0LRQpL2TI+HntgBwHpbXpUq7kTvtO02Dnsu88eyoypsizKUlUxvfVBkm5eWV06Tt93JNVUackY5fvRXkVsYmQfVi7jDOlfupcTzjGbKxxzY4xISl0mxGyfoay20bCPEMC5QnQAVWeISW0JwE6oCrWeygHFJwpeOePntDASoYs5wOKjadcd9111+jdKJ9++mkDCWwnpOXhmHVDhskTM65tcg0JSJlZg+LHH388IXPfffdtyGOZE+bWJRdddFEirVZ0QIEFJuBNPaQ5IeFw1CvM8IQmQ+chIOtYVr1FqbG1mVx9lLZyEltlrrrqqrGNrb/++rngMEDr0aNHfA3s85VXXmm4Dg4h03Yvu+yyhjj6xFNPPZWIq69Lc27huoULF8Y2D8cB2kxbMN8tJVeIbbrppontTM3rsYJB5yNtAlfHlzry1Q5KuXm6R36wNcshhxxi3fIQ25qZ27VC97CXvIB08c4tbCmXFg499NCYF9oil2CuYMva5rAZDi7pIw76Ve0swVaWeEeh1jseAMjqd82JgYcffrghSRcH1OjRo6P0bA6okPUTmZVcfdktZGumYJm3RVmRfHREfXapd3BSa7vB8YknnmiwB32iSHltesrLz2mnnZbIS9pqrLx80L61xhqP7WTfRcYUeTYlSRaJX+QeQsrNqzPIV1bf6eueLHSdknxdvxfl5WoTofuwdhlnSv346qul3LL1hGPMxhpXZowJaSHbjND1U9qXL7ttpMwzJFCeAB1Q5RlSQnsSoAOqYr2HckBhwticzMDqobJBDgSyBprmJA/ygZUGtiBlZg0uzCcMIXPAgAE2kU2dw/uP9GohPIkvt+lpVQcU8m06CLp3767wbhYZBg0aFNkLJn7lNlAyLr+3FgFZx7LqrS6Z3Ibp97//vf4pcZQ373krjvTFf/3rXxPtk825hJsuc1UTVnqkBWxxhTYBjnbzXSq43nwvkXn9+PHj4zxg4igtmG0Z3q0GJ73Ztqa9xyqUA6pIO9iM7tM44Dze82eWXa54yLo277dWdUCZ72l64403lGlXYJW1DSRsc8SIEdEf3nkgg4sDCi9+hoyRI0fKy1XI+onEpH357L+rmKCvqj67tLl1ckCdfPLJiXpuezeei/5p3w1VMj7RbvbtOqYo0qa42GAMXCnVkX2nr3uy0HXK5FXks9SbS5vnYhOh+7B2GWdK/fjqq6VcF71n2RXHmI10yowxIS1kmxG6fkr78mW3jZR5hgTKE6ADqjxDSmhPAnRAVaz3UA4odNLmRGHeFlAuxZYDgayB5muvvZZI38cKKEzumWXCU/o+Ap5M3H777SPZ0Me8efMUtuIx02pVBxT4mCu7UKbLL788gc28Mc6aiE9cxC8tQ6BIvdWF+uKLLxTez6TrAByTNsclVp3oODimTVRqufooJ1vTHFzm+xLgTMLTdraAlVdIHyuUsJLEzJNtdRVkYGtBHS8tDuKZDqg77rhDvfXWW/HKKVyPNsO2QiOUA6pIO9iM7m189TnpOPTRr2jZreqA0ja6ySabREXBk92m4xRbpzQbXBxQWbJD109pX1mTA0XsFmWqYoK+qvqcNVbS+pNtYkeugEJ7rNvGLCd+Ef3rcppH2neXiLOt/zA5FeHcEf2Vi327jimKlBWMisQv0gZJuS5lNHUmP4e4J5Np4HvZOmWT6XKuGV4uNhG6D0PZdB+ONq+zjjOlfnz11VJu2XrCMWZjbdP2GWKMidTKtBmh66e0L19220iZZ0igPAE6oMozpIT2JEAHVMV6D+WAGjhwYDyBgEH1V199VbpkciCQNdC8+eabE+lfd9111vSlzKzBxX333ZeQmZY+nF8rrrhi/GdbXWFmBisY9GQL8o0wefLk+Bx+a2UHFCbte/bsGZcHNqftAdsdbLnlltFva6yxRrRU32TDz61PQNaxtHojS2q+Hwl1YMqUKTKK6tevX2xXiLNgwYKGOLYT2O5T1zkcsfrQFvDuODOe7R0tWL2o48DpI8tra3vgdMY70XBdr1691M8//2xLPjpnTuhhr3MEedNlc7CHckC5toPIp2Rh0z3K3rdvX7XyyitHjveogCn/tthii5g12Ol2JCV6odOt6ID66KOPYkft8OHD4/LuvffeMac111xToZ1tJpSZGEB6oeuntC8f/bfmVIUDqqr6bKt3upz6WBcHlHyiGU79tFBE/zYZtO+wDqg62Tf07zKmKGpTReJ3ZN8Z4p4sRJ2yyXQ5J/Xg0ua52EToPgx5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy/TDoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFug/lgFpllVXiCTBMtPoIciBgG2jqdDAhqyeFcbRNXiOulJk2uMAE3l577RXLxIS17T1VkCmdR1mrevDuI7313jbbbBO/BwbvbDHz38oOKDCRW13orbOwBZEu55gxY7T6eOxEBGQdy6q3ZrHNFUKwEdv7e8xJYqySKjLRbrZRkG97V86M12fE9ok4l156qZnF6PNNN90UxcGKLWxHhrDxxhvH12FLTRlefPHF+Pc///nP8ufEd9MB9fe//z36DXuqw7Gg6w6O0jn20EMPxb/7egdUkXYQGXXRPdpmXQ60nVnB3M4TDmufoRUdUFdccUXMDts76SAfwMC2Ms2EMhMDSC90/ZT25aP/1pzMvIdaIVJVfXZpc+vggEI7hffh6fZg8803t7bLWkeu+tfx5ZH2HdYBVSf7hu5dxhRFbco1fkf3neZ4x9c9maxP+F62TtlkupyTenBp8yA3zybMfiDEGBN5aIdxptSPr75ayrXpnWNMlxpkjxN6jIlUy7QZoeuntC9fdmunzbMkUI4AHVDl+PHq9iVAB1TFukfnjZez+ww//PBDPIGAiQRMxvoIciBgG2giHdz04kZBT2Lg3S1YcWALUmba4MIchEHulVdeaRMXnXN1QOGGdMcdd4zyify++eabscxnnnkmzj/Sa3UHFPibTyrB6YaVIz169IjKudVWWxVyHsSg+KH2BGQdS6u3siDnn39+og7o1YE63rJlyxK/F3VI6G3zdDsxd+5cLTo+wm5Np8dOO+0U/6Y/7LvvvlE+tt56a31KnXrqqXHebJM9eCeeThcrJrOC6YC68cYb46j33HNPLAOy+vfvrzD41GHcuHHx774cUEXaQeRD6h5ba2FlFv7wNPiwYcPilWAoQ5YDKlS/onm1mgMKT/X26dMn0rHcQlK+a+Siiy7SxSx0LDMxUEX9lPblo//WgMyJjVAOqKrqM9otrMTO+ttzzz3j9gJ1MeQWfEhLtwO33nqrghN+5513jtOHM3/w4MEKK1Wzgqv+02TQvsM6oKqyb19jCthJUZtyjd+Z+06zfpWpU6acop+lHnzYRBV9GMrZDuNMqR9ffbWUyzHmL226q/1n1bMqxphIv9k2o4r6Ke3Ll91mcedvJNAsATqgmiXH69qdAB1QFVtACAcUJmv05CqO2267rZdSyYGAHmDBkfPxxx9HLzw/5ZRTEmljlQCuSwtS5tChQ6P4c+bMid4ng1UE5vsIsLUetkvICq4OKL1yAoww2W4G6YDCiodWDxMmTEjoxnzHT9Y7cFq93O2ef1nHdL3N43LNNdck7GXEiBGJS2Q707t378TveV8GDBiQkD9r1izrJYcddlgcr1u3bokn8rFqSq9gNFdHjR07Nr4G9fv9999PyN5www2j39ddd93M7fdwkemAwnadOuDmUL87Tre3N9xwg/5ZPfroo3EeijqgfLSDyIjUvc5n2jHLASX17atf0cBazQFlvidQb3Wly4IjtjXUnOH8byY0OzGAtKS+QtRPaV++7Bb5r8IBVVV91nZQ5BjSAZWXjyOOOMLpoRup/7TJoTTbp32HdUBVZd++xhSwk6I2JeP7aoOk3Lw605F9p1m/ytQpU07Rz5KXD5uoog/T5ezs40ypn3asJxxjamtPHpttM6qon6HsNkmA30jADwE6oPxwpJT2I0AHVMU6D+GAgvPGvFnCvus+ghwIII211lorngA208RnrOx6/fXXM5O2yZRy9PeTTjop05mlE4IzDJPn+s92Y4gn1FdYYYWIE1ZiyO2/0hxQeOLnyy+/zJ201nmp23GHHXZI2AbY/uEPf6hbNpkfjwRkHXOdGIDDSdc9HLGNoxnkKo/11lvP/Dn3M1YsmfKxHaYt3H777Yl4zz33XBzNrKfTp0+Pz2N7TlM2VivpYDqULrjgAn069WjGl++TmjlzZmK1J9pz8EYYP358nIeiDigz7/KzazuIPEjdw2GGMuMPTvfDDz880X7b2koNBnvRm3nJ61cWLlwYvVMOznv5t3TpUi02Prba5MDxxx8f8cDqWbxMXQZzlR24mVv0ybhp35udGIC8KuqntC/TPuTnInaL/FfhgKqqPuMl4no8knYcMmRIon6FdEDhoR7dDpxxxhnqqKOOUhtssEEifTzsY9t21bRVqf8qHVC0b1MTv37uiP7K15gCpShqUzK+bHfM70XaICm3TN8Z6p7sV63/+qlMn/GrlOKfJC8fNlFFHdcl7ezjTKkfs17Izx1VTzjG1Nb4y7GKMSZSarbNqKJ+hrLbJGl+IwE/BOiA8sORUtqPAB1QFes8hAMKzhRzQNns09cShW0gACdO165dFVYmYDuiPfbYQ5188snKdUWNlIn3D2ByBKsjzAkolOess85SS5Yskdkq/B0rtuAc04zw0m0ZzIltxNMroPDuJHz/5z//KS9pie8vvfRSXG6UY6WVVoonzFuiAMxkYQKyjrlODMgt+KZOnZpIG+9b0nUIR7QFRcLaa6+duH7BggXWy+VNjrmd2dlnnx3JWG211Rq2+TQnVNEm6WA61vIc5LjGnNAbOXKkFhMfTz/99EQ5jj322Oi3J598Mj5f1AHlqx100T0mmbUesxxQaHt1PBzz+hWpX/Na88l8DbKVHFCLFy9WmKBHmfbbbz9dhMTRXCGFeObquETEjC/NTgxAZBX1U9qXL7tF/s3+H0/aZgWZjyxHSEfUZ5c2tw7vgJo0aZJaeeWVE/UcW/SlhSLcbTJo32FXQFXVX7nYN/SfN6ZAnKI2JeP7aoOkXFsZXfvOUPdkvuuUTZ7rORdeNllZNlFFH6bz1NnHmVI/dawnHGNqa1SqqjEmUmy2H66ifoay219J8xMJ+CNAB5Q/lpTUXgTogKpY3yEcUCiC+d6UXr16eSmVHAjYbsiKJiRlmhNHGIxiMtecuITjKO19Uq5pjx49OpaJ97b8+9//bvgzt+dD+phMxgSi3u7rkUcecU2udvHMiWG8P4ehcxOQdcy13p544olxPUEdwNOJZvj+++8TvyOOXEloxjc/YwWMuQUkVpFgS7u0YL4vylx5o7fSO+aYYxouxVOcuu3YZJNN4t+xdRzOw2HuEswJa9v2n3Ccde/ePU4Lsl944QX17LPPxueKOqB8tYMuuseAWec/ywEFVnD0aaY9e/bMxId3YJnb0Onr9tprL4UnwmVoJQeU+f4vbON40EEHNfzts88+MSuUfdddd5VFzv3e7MQABFdRP6V9+bJb5L8KB1RV9dmlza2DAwrc33nnHYX3+en6Cj1goskWsvRviy/P0b7DOqDqZN/Qfd6YAnGK2lRW/DL3EFKurQ4X6TtD3JPJ+oTvZeqUTZ7rORdeNllZNlFFH2bmqTOPM6V+fPXVUm7ZesIx5i8WWdUYE6k122ZUUT+lffmyW7Pe8zMJ+CJAB5QvkpTTbgTogKpY46EcUJtvvnk8gYDJXQwUygY5ELANNIumIWWagwvIwovvMXmsJ0NwxBY2ZYK5MsKUW+SzuQ1Ymbx0xLV48k2XlQ6ojtBAtWnKOuZab7fYYovYTtCGYGAlgzlJDJvC9pcuYfbs2bFsXNejR4/My8x3yyEv3377rTJl4AZKBvPpZKQBRxFWUmjHl7mSSl5rfjcdUFdddZX5U/x5zJgxifKAHVYT6HpWxgGFRJptB111v2jRooiPTcdxIZVSsl/BBF9WePvtt2PeYIGVskjLFppxQJnvbcBquLTgyiHtenneXEGrdZx3hN2mrfKT8vX3ZicG9PWh66fk6rP/NvMeagVUVfXZpc2tiwMKtiO3A8RDO7aQp3/bNeY52ndYB1Sd7Bt6dxlTFLWpvPh17Tt93JOZdUl/LluntJyiR6kHlzYPaeTZhNkPoI8NNcZEXjrzOFPqx1dfLeWm6Z1jzGI1qqoxJnJVps0IXT+lffmy22LaYGwScCNAB5QbJ8YiAUmADihJJPB3dN4YaPgOBxxwQDz5iUG7y1ZTeXmQA4G0gWaeHPN3KVMOLhAXzh5zcg9bH+VNSJlpyM/gDSdU1p+5Sghp4/02Zvw33nhDim2Z73RAtYyqvGRU1jGXeouJcu2ogf2nrRbabbfdEnUTq35cwpQpUxLX6W3r0q7FikOzDcAWmXolI/JpewfPJ598krgGE7z/+Mc/4nOuddjFAYVtPbfZZptYNvJqTmaUdUCBSzPtYDO6T9MBzst+ZcaMGVnRo9969+4dc8GK07SAd9FpHUOnWSvitAysPNLXXHbZZfp0w7EoB0xaPvbYY8q2Pau5VQ/s/z//+U/q34UXXhjnD/nEjX6RUGZiAOmErp+Sq8/+25zYyOvvXfKhuXdEfXZpc+vkgDLbSdgt3hNlC0W4266nfXeMA8p3f+Vi365jiqI25RK/jn2nj3uyEHXKJtPlnNSDL5sI3YeZZevM40ypH199tZTroneTufzMMWby/Z2hx5jgX6YfDl0/pX35sltpd/xOAj4I0AHlgyJltCMBOqAq1nooBxTel6Qn5nC87bbbSpdMDgTKDjSRISnTNrhAPKzUMcuDbRtChrR3QIVMsyrZdEBVRboe6cg65lJvH3rooUR9u/LKK62FwYvszXp5wgknWOPJk/KFz2PHjpVREt/l5BXeG6BvVOH4SQtwHOv8YcUT3teD7xtttFHaJQ3nXSascRGcMabTTqeLow8HFNIo2g42o3us3MJ2o9OmTWtgod+5pct2yy23NMSRJ8xVU3i/X1q4+OKLY11Bfp7TAXJ23HHH+Jo77rgjTXRDP5NXB/AOQ11G6dy89NJL49/uvffe1DTxg2k7kHfEEUdkxpc/lpkYgKzQ9VPal8/+uyMdUGDnsz7n2RvSq5MDasKECbGNw24HDx4sTTP67qp/68UlJ74gk/bdSNZsc9JWQOGqqu3bdUxR1KZc43d03xninqxR++Umk23yXM9JPbi0eS42EbqOm+XrzONMqR9ffbWU66J3jjFV9J7sOowxYf9lxpmh66e0L192a9Z7fiYBXwTogPJFknLajQAdUBVrPJQDCk9l68ENjjvssENuybBiAHv/44n1L7/8siG+HAi4DDQbhIgTUmba4GLmzJmJ8mA7I2zvFCrQARWKLOVWTUDWsbx6u2zZMqXfrYS2A20CXohrCzNen5Gol6ussorC9XnBfGoO27Lhxj8vbLXVVnFa2JZTv48ta+ULHGK6HRwwYIBabrnlou95DMy8uE7o4ZqTTz45Tk+ni6MvB1TRdrCo7lGGTTfdNCqDbYvCN998M1E+vE8rL2y55ZbxNTvttFNqdOmUfPXVV1Pj4gc8xb/WWmvFstFmp4WiHPDuFK0/871mSBPvfMJvrlvqmSvA0N+71A9djjITA5ARun5Krj777452QIGfr/rs0t7UyQGFBw60/eMI57AtuOrfdi3O0b47ZgWU1kdV9l1kTFHUplzjd3TfGeKeTOvRPJatU6asIp+lHvLaPFebCN2HyTJ21nGm1I+vvlrKzdM7eHOMqVRdxpjQR5k2I3T9lPbly25lved3EvBBgA4oHxQpox0J0AFVsdZDOaBQDLzs3ZxEmDVrVmbpMJGr43/wwQcNcefNmxf/jnjYXqhskDLPO++8VJF4elznD0e85N0WvvnmG4V3sug/TJoWDfIJ4K+++qqoiNrGX3311WOOe+yxR23zyYz5ISDrWN4N4hVXXBHbB+oZVsNkBTh2zHqZt5rJdOjgOtd2BA4RMx39+bXXXkvNHlao6HjmMa8tNAUirr522LBh5k8Nn9FOmC8b19flOaCkjny0g8iclJun+3feeScuq+0F9pCJNkOXC8e8rYTM9ibLASW3Sbr55psb+JonJk+eHOcDjkW5UsmMW5TDdtttF8mGk8ncCtB0ErhunWs6QcHrqaeeMrOW+dl8ETWuBaOiIWT9lFx92S3KWMQBJfOBFZJpoSPqs82ZK/P35JNPxvYMXT/++OMySvy9SHlxkYyf1w6YW1siL1ipYAtSbhZ32/W0bzcHVBHOHWHfefZUZExRpKw22/bVBsl85JXRpe/0fU8Wqk7Z5OadK8qriE2E7MNkuTrrOFPqp871hGPMX/qFKsaYsP+y/XDI+hnKbmW953cS8EGADigfFCmjHQnQAVWx1kM6oDDpickD/YetkNJefDt37tzEhI/NASUnjvH0ZNkgZZ566qmpIpGnrl27xuVBuR599NGG+ObEJOKcc845DXHyTjzwwAOJdLA6rDMEPMGPSVVtE3gBMEPnJvDuu+/G+obe0yZSYBvynTWbbbZZapuhqT3//POJredWWmmlaOWF/t08LlmyRJlbsvXs2VOZK0zMuPKzfG8UytK9e3f1448/yqjx9zlz5iTKjmt+97vfxb+7fHjxxRdjGdhuIi/ceuutcXxdz/IcUCHaQeRTyk3TvS7TSSedFOcd5bAFuToUPNNWyL311luxPLDIckBBj/369YvjY3XTt99+a8tCtPrpwAMPjOOmbRGmLy7CASuvtN7QP5th1113jX9z7f9GjRoVXwO5Rx11lCky8/ONN96YuHbcuHGZ8W0/hqyfkquP/luXoYgDSubj9NNP12Iajh1Rn7F1ZV6A417bHY54KjktFCkvZMj4We0Atm0z84F6uHTpUmtWpNws7jYBtG83B1QRzlXYd8gxRZGy2mzbVxsk85FVZ5APl77T9z1ZqDplk5t3LqRNhOzDZLk66zhT2nOd6wnHmL/0C1WMMWH/ZfvhkPUzlN3Kes/vJOCDAB1QPihSRjsSoAOqYq2HdEDhye0hQ4YkJhMwgTZ79uxEKfEEe48ePRLxbA4ouZpggw02yJz8TSSS8kXKzHqfC0TgZdjm5Mjaa6+t8FJ4M/hwQMl3nTzxxBNmEi37Wd5cwRnl8q6Vli0wM64waW3WGXMiBasF8eTuddddp/r375+Id/jhh6c6FiRWPM1qvv8IjqFnn3024bxCOuY7e1ZbbTWVt82amQ6c53rbPV2eY4891oxi/WxugYbrsrbsswkwb8723HNPW5TEOThS4NjVecQxzwEVoh1EpqRcU/dmpuEYvP766xN5hhPeFtCvHHfccYm4WDEEZ5MZMCE1cODARLwsBxSulRMP2KoRcswAp9TBBx8cy1155ZUjGzbjyM8uHCAXNmvaCxykOsiHEuAscwnSqYstJ11X5cqVKM2++zBU/ZRcffTfmmkRB5TMB+wxLXREfUa7lxfkO2LOPffc1EuKlBdCZHxsY2Ou7EPbOn369MQkOtotjA/SVj/Z5GZxtxWG9u3mgJL6y+JchX2HHFMUKavNBn21QTIfvvpOn/dkIeuUTXbWuZA2gXRD9WGyTJ11nCntue71hGPMLtEDWdI+bd/LjDEhz0c/HKp+hrJbG0eeI4GyBOiAKkuQ17crATqgKtZ8SAeULspNN92UWPWCVURYMo1l7tLxpCdNTQcUnn7Fi9cxYax/10csER8/frxOyvmYJXPQoEEK+6XbAt5NhcltnT6OKMPIkSPjp3TLOKCw3/Bpp52mMElopoFtpDBBi86lFcPChQsjRuZ2WLp8G220kbrllltKOxNbkUtnzjNupFEvbPUW9i1tXNvDCiusoK699trEBKULp4kTJ6o111wzUW/gMILTAc4tc+Vdnz591HvvveciNhFn7733TsjPmhzVF2J1jC4bjniiziXAIYEVT+ZEOK6H0ytPxiuvvJJIM80BFaodTJML3aINMP/w3i6Tj/6MbUizAmzL1Ck+wymz//77Rw448zctM88BhfSGDx8ev6sL10EOVllhy1X0W6Zc6CZrC8Y0DpALh6n+0/mTR6SLd1Vg60W5+hZxDzjgADVt2jQrJtg3nAi269Bn3X333dbrcPKll15SRx55pFUvQ4cOVW+88UbqtWk/+KyfWVzL9t86/2a9S3tIokg+Oro+Ywth29af8+fPj1ZpSztB+4yJpc8//1wjicY4aWMxG/csPrBftAfSqa/rAJzoeG+OLWTJteVDyqB9u20xWYRzFfYdckxRpKywp6LxtQ3m3UOkyfXZd5a9J9NlMY8h6pQpP+1zSJuQafrsw6Rs83tnGmem2TPa+ay2ug71hGPMcGNM2LvvNsNn/Qxlt2Y952cS8E2ADijfRCmvXQjQAVWxpqtwQKFImCQ75phjVN++fRMrFfRkA45w7GDgjadj4bDQAQNRc3WDeQ0+46WtRUOeTEyypIX77ruvYWIOk5ILFiyILinjgMIT8LJ8+jvSwE1+Kwa5FZYukz5isgsDPobOQ8C2/ZzWN46YeMTqkXXXXTdamQTH62OPPZao+0VpYLIUW3tg8hLv5UE62mGANgRPXWIlCSb1mwlYqaXLAHloR/LCXUk6DbQAACAASURBVHfdFV+Dlx+7Bqys1GnJI/KRF44//vj4+pdfftkaPVQ7mCdXlsf2ferUqdY8myfhWMOWcnBiQx9a15AH/aPPQdkPPfTQiAVW4LoEPACBpzJNJ4QpGyuT8MRlnv7LcoC9wqZtfPS5o48+2lokcyWCjmseN954Y+t1OHnIIYdkpnnmmWemXpv1g6/6mce1TP+t82/qPs0BVSQfdajPV155pS5efMT2NaZdyM+TJk2K4xYpLy7Ki6/TAmuM41BfsUL0n//8Z+YDKXlys/SPfNG+3RxQRThXYd8hxxRFyupi21k2mHUPkZcPXWeyji59Z5l7srhBMD6EqlNGEtaPIW3ClqCvPswmW5/rTOPMPHuuez3hGLOLCjHGhK2HaDN81c9QdqvrOI8kEIIAHVAhqFJmOxCgA6piLVflgDKLhfd1YLuVF154IVpphBsI7bwx4/EzCZAACTRLAAMxrBR6++23o20ym3U6menjaVu8lBZ/aZPSZnx8hmNVX1NlO4d0saISKxzaISxatCjaLm/GjBlRufFOMR2ge2y3WNQGsE0Y3k+IvgqrR7Adn+s7w3TaPNoJhKif9pSaO+vigGpOcnNXtVt9bo5Sfa6ifRfTBe27GC+fsXlP1hzNUHWc48zm9BH6Ko4xQxP2Kz9U/fSbS0ojAX8EYPN4OAXbxDOQAAm4E6ADyp2Vl5gd4YDyknEKIQESIAESIAESIIEABOrmgApQRIpsYwK07zZWfo2KjgmzF198MdolA6uyXP++/vrrGpWCWSEBEiABEiCBjiVAB1TH8mfqrUuADqiKdUcHVMXAmRwJkAAJkAAJkECtCXCCvtbqYeZKEqB9lwTIy70QwHvesrYTTPvN3BrUS0YohARIgARIgARamAAdUC2sPGa9QwnQAVUxfjqgKgbO5EiABEiABEiABGpNgBP0tVYPM1eSAO27JEBe7oUAHVBeMFIICZAACZBAmxOgA6rNDYDFb5oAHVBNo2vuQjqgmuPGq0iABEiABEiABDonAU7Qd069slS/EKB90xLqQADvapw9e3bhvyVLltQh+8wDCZAACZAACdSCAB1QtVADM9GCBOiAqlhpdEBVDJzJkQAJkAAJkAAJ1JoAJ+hrrR5mriQB2ndJgLycBEiABEiABEiABGpCgA6omiiC2Wg5AnRAVawyOqAqBs7kSIAESIAESIAEak2AE/S1Vg8zV5IA7bskQF5OAiRAAiRAAiRAAjUhQAdUTRTBbLQcATqgKlYZHVAVA2dyJEACJEACJEACtSZw7733qjFjxkR/3333Xa3zysyRQFECtO+ixBifBEiABEiABEiABOpJgA6oeuqFuao/ATqgKtYRHVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP29evVS++67r1qwYAH/yIA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdTcBr744gvVvXt3ddRRR1U8m8zkSKC1CdABVbH+sAJq4MCBqkuXLvwjA9oAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2UHMb6NatW6Sjgw8+uOLZZCZHAq1NgA6oivW3xx57qEGDBqk//OEP/CMD2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZQcxsYPHiw2mmnndSZZ55Z8WwykyOB1iZAB1TF+uM7oCoGzuRIgARIgARIgARIgARIgARIgARIgARIgARIgARIoAQBvgOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbO5EiABEiABEiABEiABEiABEiABEiABEiABEiABEigBAE6oErA46VtTYAOqIrVTwdUxcCZHAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmUIEAHVAl4vLStCdABVbH66YCqGDiTIwESIAESIAESIAESIAESIAESIAESIAESIAESIIESBOiAKgGPl7Y1ATqgKlY/HVAVA2dyJEACJEACJEACJEACJEACJEACJEACJEACJEACJFCCAB1QJeDx0rYmQAdUxeqnA6pi4EyOBEiABEiABEiABEiABEiABEiABEiABEiABEiABEoQoAOqBDxe2tYE6ICqWP10QFUMnMmRAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQQAkCdECVgMdL25oAHVAVq58OqIqBMzkSIAESIAESIAESIAESIAESIAESIAESIAESIAESKEGADqgS8HhpWxOgA6pi9dMBVTFwJkcCJEACJEACJEACJEACJEACJEACJEACJEACJEACJQjQAVUCHi9tawJ0QFWsfjqgKgbeZHI//PCD+u6775q8mpeRAAmQQGsSWLx4scLfkiVLWrMAzDUJkAAJtCgBtr8tqjhmmwRIgARIgARIoG0I0AHVNqpmQT0ToAPKM9A8cR3lgFq2bJmaPXu2euutt6LjggUL1M8//5yX3bb8Hc6nXXbZRXXp0kW98cYbbcmAhe6cBN555x315ptvRn+LFi3yWki0MR9++KH673//q+bNm6cg30cb880330Ty/ve//6mffvrJa57rIgxtDnTz3nvvKZTz+++/77CsoY9C2/fb3/5WzZ8/v6l80M6awhb8ojrZma/ChrI1jpncNRRKB+45cI9Z9/6E7S/7eXdrZkwS6DgCdW9LO44MUyYBEmgHAnRAtYOWWcYQBOiACkE1Q2aVDihMBA8dOlT16tVL/eY3v4kmFTGxqP+Ql913311deOGFChNTOmACdM0118z9W2+99dTmm2+ujj32WDV69Gj16aefahENx6IyjzjiCHXDDTdEE9oNwpo8cdFFF+WWCeVebrnlYkat4IAaOHBgarl22GEHKy3oK03Ht956q/UanmxtAnPnzk20A1dddVXpAr3//vvq1FNPVb17944cFrpt0ccVVlhBbb/99uqPf/yjev75553Tmzx5stp///3VqquuGtXFbt26RceuXbuqvn37qhEjRqivvvoqUx7aP5uNb7fddgqDxrTw3HPPWa+DrEmTJsWX5bVpl19+eRzX9uHhhx9Wp5xyitpqq60SbQ7Yob3+3e9+p4477jg1ceLE+HKsygRrW7mKnuvZs2cs1/ygJ0CRD5SxaKCd/dJ30s5+4ZBmZ0Xtyhbft601M2YKWSdd25jddtsts0149NFHG/DhIaT1118/9brrrruu4RrbiWZ0EJKZLY8++hPIzdOHboP12LiZcSzb3y6K/Xz4ft5WT+S5PHuXY5yOHnMNHz5cFiEas+l6aTs+++yziWuKlDkvrplenz591IABA9Rhhx2mrr/++sx72zy5JvdWbEvzyqe5lWlHE0rlFxIgARIIQIAOqABQKbItCNABVbGaq3BAYUB65JFHJiabMREMZ9Hqq68eO1f0JDGOH3zwQUwC15977rkK15hx8j4vv/zy6i9/+Yt1lUKzMvEU/kknnZRwkMUZLfjhzDPPLFQelPejjz4qmEr10S+++OLohl3qZ91111W33XabNUMzZ85UO++8cwOPjTbaSM14fYb1Gp5sbQJwOJk2su222zZdIKwOGDJkSKKNgeO2f//+CjeNNof36aefnpseVtzAYWXmE21Av379YmeU/g3tzX333Zcq8+uvv1ao89p5pa/DERMAaQGrtwYPHpzIA8oDZz5+0yGrTYNj/+WXX9ZRE0esbgI7Mz/du3ePHE62vKIMOuCJU/O6Mp+Rli2UnQClnf36kAftrEtU/2x25uOcL1srM2YKWSdd25hbbrlFrbHGGg1tA9pOOP+xMlUGyMZvZn3X7ck222yjpk+fLi+xfm9GByGZmZn02Z9AbpY+NDvbscg41tQHJmqLhmb0kZYG+/lsnbdyP5+mc/N8lr3byt6RY64999xTvfrqq2b2o89wkMMRjDpo1k2M6XCf/PHHHyeuKVLmrLhmWrbPyA/Gdj/++GMifXzJkiu5t2JbmlU+Gyt9rkg72gCVJ0iABEjAMwE6oDwDpbi2IUAHVMWqDu2Awg0AVr3oARvSu+mmmxJP/GMLLjyNpePgaDqgNJI777wzEee8885TS5cujWThxhiDfUzKyoH9sGHDtIiGo5R5zjnnRO8bwaTsu+++q8aNG6f+9Kc/NcjEKouyoRkHFHi2Qrj33nsTuoJO77rrrsysY7sv0wYw8c/3vmQia9kfsRXehhtumNA3bsA/++yzwmXC1nq4Cda2s+KKK6qrr746ahu0sG+//VadeOKJcRzEzXNAzZkzJ5FHrNx87LHHlFkH8R3nddo4jho1SidrPV577bWJ+LgG7WLe5N4JJ5wQX3f22WdbZeOkbNM222wzqxMecTFZsMUWW8RyscoJTw3rgMkIrPw029QsBxQccy+99FLUhmJ7wgceeCCWjXJOnTo1ygvS/de//hWtntLsQjigaGe/Op9oZ7+wSLMzbfPNHn3ZWtkxk5wADFEnXdoYPDiClSO6fuOIh1PywtixYxPXbL311s5bgDargyqYhepPwFPqw+c4towDqll92GyE/XySitR5q/fzydJlfytSdkiqesyFMRVsPytgtbnZNmJ8lxWKlFnG/b//+79orIf7ZThxMdacNm2aOv744xva6LPOOis1G1KuzeZauS2V5fPZjqZC5Q8kQAIk4IkAHVCeQFJM2xGgA6pilYd2QJkTw5j4eeGFF6wl/PLLLxOroWwOKOzrbw7Y//a3v1llPfPMM4l4mDxNW0UjZWLFlC3AEWVOwiIfSKdMMB1Q2GYOW2Dl/bXSO2fwNJ+pr0suuSQTF26IdHxseQCbYOicBOCk0Lo2j2PGjClc4AMPPDCWhTqK7Y3SAp6w1+llOaDgeMHNtY67yiqrJBwzpny8yw6/67hwpL3++utmlMRnrETScc3j0UcfnYgnv2BVgY6PSdq0INs0THSkBTjqtEysesI2WLYAJ5SOl+aAwmSz6byCnAcffDC+DtfLbQ/NOp/mGCgzAUo7SzqgoAPamX2lnc3ui5zzZWtlx0zmBGCoOunaxhxzzDGJ+n/77bfnIsWqTt3W4Jg2drMJalYHoZmF7E/AQerD5ziW7e8vlsZ+/pe+JFSbYqvPaeekvWeNcSCj6jHXGWeckZb1+Lx0tN9///3xb7YPRcos46bdKyMdPBhotrcYh33++ee2LDS0MzburdyWSm4+21ErUJ4kARIgAY8E6IDyCJOi2ooAHVAVqzukA+qRRx5JDGyztv9BsS+99NI4vs0Bha0LzIFy1qBarnbARKstSJlpA05cixVXZvp4V1WZYDqg8lY/lEmno6795JNPFLYl08yw3SK2OkgL5mQVHH4MnZeArp/rrLNO9M4PbSMHHXRQoUI/+eSTsX1BxjXXXJN5PeoZbrARN8sBZTp7EBerdbKCdLTsuOOOqdFnzZqVyLMuO47SQWMKueeee+Lrnn76afOnxGfZpmH70rSAFU86fWwLkxawgkxPRKY5oFB/ZZBcbOXD+2KQhxAOKNpZowOKdhbGAeXD1nyMmcwJwFB10rWNQTul2xccsZo8L6CN0NekvTMyTUazOgjNLGR/AhZSHz7Hsbrdh06KjlOb1YfUL/t5SaRR563ezzeWMP2MtPesskNK1WOurPqnSzVhwoS4nUPdwmr6rFCkzDJu1r0y0sT217rNxTFtvCvl2ri3clsqy5elR9/zAVm6528kQAIk4EKADigXSoxDAo0E6IBqZBL0TCgHFJ74xDt/9KB2tdVWy3Q+oJB46mrXXXdVu+yyi/r0008byo3tubQ8HLMG1RjMm3FtEzFIQMrMGnA+/vjjCZn77rtvQx6LnOjsDiiwuOCCCxLM0pyQcDjqFWZ4Apyh8xKAE1JPap1//vnRO9V0XYXDMstJaVLBakBz685VV1012vrNjGP7jHYDbQy2h7MFbL8JWTpP66+/vi1a4hwGfT169IivwbWvvPJKIo7+glVCWjZWo+jPOG666aaJ7Un1NTjiCVkdN22CAPFc2zTw0844yM2bHMbqBUwoPvTQQ3G2zMkGrN6UwcUBNXr06Khcvh1QtDPamWmPeXZmxi362Yet+RozVVEnXdsYbPmELVF1u4VVpXkBfYKOn/VOPSmnjA5CMgvdn4CDqz4Qt+g4VvfV0EkRB1QZfZi6ZT9v0vj1s6vOW6Wf/7Vk+Z9cy64lVT3myro31XmSzvknnnhC/2Q9FimzjJuXn9NOOy1uc1HP01ZjSbm2++VWbktdyqeVU7Qd1dfxSAIkQAKhCNABFYos5XZ2AnRAVazhUA4obLWnJxFwxNNCZYMcHGYNqs0bDqS/3XbbWZOXMm0Dan0hXpxtlmnAgAH6p6aO7eCAwvsszMl8bPOF1RQyDBo0KGILJ5TcxkvG5ffWJmC+F+iNN95Q48ePT9QrPBnqEvDON7M+2p7GdJEj40ycODEhF05Ul/DXv/41cd1ll11mvcxsm/CuNDjdzXKkvUPKtwMKE4RmushH0YAJ5hEjRkR/eEeHDC4OqPnz50fXjxw5Ul4efW92ApR29qsDinamVJ6dWY3P8aQPW/M1ZqqiThYZNx166KGJdibLiYH3pvTu3TuKv9JKKzk/jAA1ldFBSGah+xOUvYg+io5j2f4mGwH282H6+STl7G9F7B2Sqh5zZd2b6pLVyQF18sknJ9ro6dOn62wmji7cW7ktdSmfBlK0HdXX8UgCJEACoQjQARWKLOV2dgJ0QFWs4VAOKDhyzMnNrC2jXIssB4dZg/zXXnstkb6PFVCYLDfLdMghh/w/9s4FeK/h/v/8SzG/wdTQaY12qqatUTpKxn1Uq+NSasrP/JT+jKKtS11+qmW0aN0vcYuQuF/ifgmKlJIgQdzjmiARQuIWCRJBYnD+837SPdmzz57bc/ac7/N8n9fOfL/nPOfsfnb3tZ/dPWc/Z3eLJt3rrx8MUMq4uwHwqaeemuBhD7xp01fc4CZg9jhZd911WxldsGBBtOyyy8Z1S7Nsijh3IChEG6N4jzjiiDgtqu9pL+NuGt0BhS233NL10vptD4ZceeWV0ZQpU+LZf4pPbbJvkDa0AUqJWW+99eK8yvh7//33e9Pc6cUiBqg82Z0OgKJnSwxQ6FmellW7H0LX6nhm8uUqRJ10n8WyPtyxlw5V+6Y9R9KcvYfTAQcckObNez1EGXgFF9jLLi2crtfdnyiOMuVR9jmW9jdZuvTzSR76FaJNaZeafqWMvktK089cWe+mJleuHg3kDCg9q5p3Wz2Ly4jkc2W5+2RU0ZW629Iy+SvbjvpYcA0CEIBASAIYoELSRFY/EcAA1XBp12WAGjJkSPxAqwfbuXPnVs6Z+3CY9ZB/0UUXJeIfPny4N35XZtZAipaDMQ/pOqbFL+OXlp0xf2kzIfrFAKUlaNZYY42YnXTO6IOWB9l4441b91ZdddVIyzfgBi+B119/PVp66aVb5X3GGWfEGd1xxx1j/VhttdUi6UWe22ijjeIwodoYxWkbZST3/fffz0tK676WELXbBy0n6HP2YIj2BpFzX6x9BvM6DFCHHnpoIs2qm2lLB/ryknetymCDkd3JACh6lhx0Q8+MNoU/htK1Op6ZfLkNUSfLPDe99957cZuv9lGzndPc4YcfHrdHDz/8cJq3tuuhyqBN8H8uVGFWd3+iJJYpj6LPsYYF7a8hsfhIP5/koV9V6ke7tPwrZfRd0pp+5kp7N7Rz1i0GKHeGppaFTnNlufvkVNGVutvSMvkr2476WHANAhCAQEgCGKBC0kRWPxHAANVwaevlUvuhhHYrrrhiPJCwyiqrBBHvPhxmPeRrANceDJ4wYYI3Da7MNAOUBsR32GGHWKYGl337VCmS8ePHx/6UhrRZPa4BSoYaTevXjItJT09qzYIoMhDvzViXXdRXz3Z5mOXStDSUuT5q1KguSzXJCU3gtNNOi8tbum6cazDW8np5zl7aUcbLUM4ecNOsoDJ10G73pNda5s519mDIhRde2Lotw6sMb6Yu6PjQQw8lgmrvJXM/xB5QEq6BW7EzcnVUnrUfVFHDWyKRzo8qgw1GlF0evplhxp99RM+Sg27oma0dYc9D6ZrddoR6ZvLlNESdLPrcZOL/yU9+Ercxqs96SXadrinfaoO0756W4yvqQpVBWnxVmNntVx39idJctDzKPMcaFnb6aX8XU7HrqvSVfv6muH6Lh/vsYnQp1LGovpv4mn7myno3NWnqBgOUniO/+c1vxmW3wQYbeHXZpLksdxPOPnZzW1o0f520ozYDziEAAQjUQQADVB1UkdkPBDBANVzKerkMbYD67LPP4gdavYysvfbaQXLlPhymPeRrsE0v+opbf9tuu22kDb59zpWZZoCyBzgkc+jQoT5xrWudGKC0BIK9DJlJu/ZDOPnkkyMx7WUn/vbXazLgTZs2LVp99dVbZbTJJpuUGujvZRb9mnYNKK611lqt8naXp3PXUz/uuOMyMdXVxixatChuN1QHyxq2NHBq6q6Os2bNasuHPRgycuTI+L67VNX666+fGKi95ZZbYtmhDFCK/LHHHvO2PeobzjzzzMwBiTjxKSdVBhuMyLIDoOjZYnLomdGg+o6hdK2u9syX8xB1suhzk4n/7LPPjtsutYu+AWr7S3x3mV4jx3cMVQY+2eZap8ya6E+UxqLlUeY51uSd9teQWHKkn1/CQmed1o+klOK/iuq7kdh0X/izn/0s0soXWX+/+MUvEm1inUvwKS7NoNffpZdeGv31r3+Ntt566zh+rUrwhz/8IdLsvixXlrtPVqe60kRbWjR/nbSjPhZcgwAEIBCSAAaokDSR1U8EMEA1XNp1GKD0laQ9CLv55psHyZX7cGgMUPoa6Y033og0gHHIIYck4tasAoVLc67MAw88sOV/xowZrZlImnVgr4+tZfU0mJLlihqgNAvI5pR1vuuuu6auy52Vlm66d9dddyXya5ZiU75DLvvVTXkmLUsIaNDR6LhZEmzJ3ShaZ5114vsyVma5utoYV64MwGXchhtuGOdBeZ08eXJbcHsw5Pzzz4/vayDVnimg8CNGjIjv33777bHskAYoRaD6Z4yDpozMUUbiO+64I05HmZNOBxvsOMoOgKJni+mhZ7YW1XMeStfcdifUM5Mv1yHqpPvclPbhjonf1kW1K3//+9/Nrfi43377xe2bnr+KulBlkBVfp8zccq2jP1G63fII8RxreND+GhJLjvTzS1jorNP6kZRS/Jer72XanyaeucyzU5ljnQaovHTsueee0QcffJBbAGW5+wR2qitNtKVu/kK2oz4WXIMABCAQkgAGqJA0kdVPBDBANVzadRigNHhgP/BuscUWQXLlPhwqjq9//euRZtPY8Zlzzex6+umnM+P2yTTh3eNBBx2UacwyEckYds4558R/Mkj5nL561gyIY445JtJyDJoFMmfOnEh7SGkW15prrpnI1y9/+ctIy/T1sttqq60SeRLj3//+972cJdJekMD+++/fKnvNTtS+IK47+uijE7phL9Hn+tXScXb9zGtj5s2b19pfTEvduX/2hsvuTCzVwTJu0003TaRr+vTpbcHtwVh3b7pnn302MXtT7bPaKLkxY8bEskMboCRfywhpZqc94Ggz1j5VZV2ngw12PHZ6NAiR59CzxYTQszxNqX4/lK7V9czky2GIOuk+N+UNACsd9qyRH//4x4mk6bnG1PNtttkmcS/vR6gyyIqnU2ZN9CdKt1sedrvtnhd9jjU8TLlIDu3vYir080Y7Fh87rR9JKcV/ufqe1/403Rdut9128fuf/S5on9sGd9WtOg1Q+ojy2GOPbf396U9/in77299G3/ve9+LnScWvjytvvPHGzEIoy90nrFNdaaItdfPntp3277LtqI8F1yAAAQiEJIABKiRNZPUTAQxQDZe2Xi5DL8GngUz7QS1vNkPRLPseDpdbbrnoK1/5SrTMMsu0vuDffvvto4MPPrjwjBpXptbD1sO6vnC0X7yVn7/85S/RJ598UjS5lf1pUGqNNdZIsLSX7KocwQAIePTRRxP5WWGFFeIB9gFIDlE2RGDBggWtF1zVo1/96lfeWO0v2eXPnv3jBlA9LNPGfOMb30j4t8PaX8TOnj074U/tSxnnxuPbR8keDBk2bFib+MMOOyyRhn333bfl5+67746v12GAMgmRcVD71qlNtTnpPG/2p5Fhjp0ONpjwOtrtcN4AKHq2hBx6toRF2tkDDzwQbbzxxql/+mAi7aOPkLpW1zOTL98h6qT73JQ3AKx0qE2x25M333wzTp49u1P7QhZ1IcsgK85OmTXRnyjdbnmEfI6l/W3XDPr5JJNO60en7a+r73ntT9N9oVmdI0kp+asb9oAS///6r/9KtMtaoi/NleXuk9OprjTRlrr5C9mO+lhwDQIQgEBIAhigQtJEVj8RwADVcGnXYYBSFlZaaaX4ofZb3/pWkFy5D4dFHvLzInZl2i8yGuTW4K89aCJjXdp+UnlxdXLfHZQfMmRIJ2K6Koz98r7TTjt1VdpITD0E7P2NvvOd70RaUtL90ww/u67lfQm/8sorx/5lqM1y2j/JXuLPxLPDDjtE9nJPGmw298zRt8G4Ly7NpLKXldRMLy2r5zp7MMRn0JHR6mtf+1oiHRMnTozGjh0bX6vTAGXSO3Xq1EgzywwHc3zwwQeNl9xjp4MNtuAyA6Do2RJy6NkSFmlnt912W5t+Gz03x48++sgb9cCsdgAAIABJREFUPLSu1fHM5Et4iDqZ9dzki1PXJkyYkGA9atSo2Osee+zRuqev8GVUKupCl0FavJ0ya6I/UZqzyqPqcyztb1Ir6OeTPPSr0/rRafubpe/tqYuipvvCIu+m3WCAEquXX365tdep6e9U32Xs8bmy3H0yOtWVJtrSrPxVbUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQG2wwQbxYIMGYtO+Ii6TXffhsMhDfp58V6ZtgFJYLZO37rrrxnnRQ7qWUWjKiZtmeJmXA81K8A1qN5WeEPHoqzKTHwxQIYh2vwwZbk2ZFz2q3fDNIDK5ddsYvSBmuRdffDFhIFJdmj9/flsQe8BNadWSmkXcq6++msij9k7yOXsw5Mwzz/R5iTQ4a3PaaKONIn2taq41YYBSwmR822STTeJ4Ff9RRx3lTbPvYqeDDbYsuzzyZkChZ0vIoWdLWKSddToAKnmhdc1tz0I8M/nyHaJO5j03+eLVs5Rdl3ffffeWNy2PapZR1nJ6ZVzoMkiLuwozO8919CdKc155VHmOtdNP+xtF9PPttaTT+tFp+5un724Km+4Li7ybdosBSqzc5QAvvvhiF2Hrd1nuPiGd6opk2W1RHW1pXv6qtKM+FlyDAAQgEJIABqiQNJHVTwQwQDVc2nqg00t8aLfLLrskBi3z9mIqEr/7cFjkIT9PrivTNUAp/P3335/Ii77SzXsRz4u3zH3NejIDzzpqn6hedhigern0yqfdXrt92223jZ5//vnUP21Ob+u6XlbTnNvGTJo0Kc1rfF2bwBv566+/fnzdPlEajR8dNfOoiHO/8DdL57lhiwyGfPHFF9Fmm22WSMchhxwS/27KAKW062tYM0AsHlqyrKirMthg4rAHHbLaXfTMEFt8RM+SPHy/tNykZvSl/T3++OPeDz7q0DW3PQvxzOTLc4g6WeS5yRe39h4xbavqtQb0tOSeuaYZ30VdHWWQFncVZnX3J0pzkfLo9DmW9jepFfTzSR761Wn96LT9LaLvdiqb7guLvJt2kwHq8ssvj9tgtcXaJ8rnynL3yehUVySr7ra0SP46bUd9LLgGAQhAICQBDFAhaSKrnwhggGq4tOsyQGm/JDOooONll11WOWfuw2GRh/y8SF2ZPgOUZGimjp2fP/7xj3mig913lybLm+kRLOKaBGGAqglsl4o9+eST47qTt7+HPVCg+rbnnnum5uqII46I5crvJZdckurX3LBnGWivN5/TZs12XT/ggAN83tquXXHFFYlwo0ePbvOjC3Ye02ZAyZ8MavaSfnaamjRAKS3aC8fErxmZRV2VwQYTR9EBUPTMEFt8RM+SPEL+qkPX6nhm8uU5RJ0s+tzkxn/zzTfH7YjaExmcdtxxx9a1Nddc02vsc2WY33WUgZHtHqswq7s/UVqLlkcnz7G0v0ltoJ9P8tCvKvWjXVr+laL6biQ13RcWeTftJgPUXXfdlWiX//CHPxh0iWNZ7onA//lRRVfqbkuL5q+TdtTHgmsQgAAEQhLAABWSJrL6iQAGqIZLuy4DlGY5mAFLHTWAmedmzpzZWotasxR8s3zch8MiD/l5cboy0wxQzz77bCI/Wh5MS3p16rSMnvnLk7HeeuvFcWtvmF53GKB6vQSLp18zebTnk9qAvCX1jFR7lpLap0WLFplbieMLL7wQ1wvJ33zzzRP3fT80e8e0Sz/72c98XqJJT0+K/cjviiuumJoGW4D9daaW90tbPrDoYIhkH3zwwYm0mLSHMEBphoVmv2ofrDTGJn8ywpm4f/jDH5rLuccqgw1GeJEBUPTM0FpyRM+WsAh5Vpeu1fHM5Mt3iDpZ9LnJjV9tovoB05b8/ve/j3+fcsoprvfU33WVQVqEVZjV3Z8ozUXLo5PnWNrfpFbQzyd56FeV+tEuLf9KUX03kpruC4u8m3aTAWro0KFxm6y2+YQTTjDoEsey3BOB//Ojiq7U3ZYWzV8n7aiPBdcgAAEIhCSAASokTWT1EwEMUA2Xdl0GKGVDA5tmoEHHyZMnZ+ZOAxDG//Tp09v8vvnmm/F9+dNyXVWdK/Poo49OFanZGCZ9Ompmks99+OGHrT1ctI+L/jRQ7rqDDjqoJUuzCbKWlZo7d2607LLLxvH++te/dkX13O9VVlklzs/222/fc+knwcUJ2C/ZRZf6tI0dqmf//ve/UyOU/th1Mm/ZKlv30gxQimzDDTdMyE2bzWQSZg9wKD1ZbZPaQZPm008/3YjwHlX/V1pppdi/CZdlgHLbtGOOOcYre/z48bFc7cWQ5cwsBcW/9957Z3lN3Lv22mvjOBRWy5eUdUUGQNGzdqroWTuTEFfq1LXQz0y+/Iaok0XbGF/822yzTaJNMG2altQr6uosA18aqjKrsz9Ret3yCPEcazjQ/hoSydnL0lv6+cVsqtaPJYSLnbn6nvaMY6Q13Rced9xxJurU4913351oB++8885Uv7pRJs+u3zyD2K677ppIi2aq+pwrN4+7T0ZVXamzLXXzF7Id9bHgGgQgAIGQBDBAhaSJrH4igAGq4dKu0wClQVIzuKCjlr9K21h71qxZiQ1GfQYod5BXswOqOlfmoYcemipSaZLByM7T7bff3ubfHtiV3yOPPLLNjzFA6f5f//rXtvvmgh7w7fiK7kdjwnfbUV8u219Ab7TRRt2WRNITkIA92Fi0vp5//vkJnde+IWnu3nvvTfjV7JwFCxZ4vU+ZMiXhN8sApaWh7OXvVlhhhdbMKJ9gLYlpL+23xhprRB9//LHPa+vaI488EqdDS4rkuUsvvTT2b9qCLAOU26Yddthh3ijsdmrrrbeO9ODqc9p03d4DSuVT1I0cOTKR9ltuuaVo0NhfkQFQ9CzGFZ+gZzGKoCd16lroZyZfxkPUyaJtjC9+t31Xm1b04wQjr84yMHHYx6rM6uxPlE63PEI8x5r80/4uJkE/bzSi/Vi1frRLzL7i6nvaM46R0nRfqOWh85w+ajLPczpqZlCWK5Nn12+WAUrLQNvp+PrXvx4tXLjQmxRXbh53n5CqulJnW+rmL2Q76mPBNQhAAAIhCWCACkkTWf1EAANUw6VdpwFKS8ztt99+iYdbDRxoQNN2mrWw+uqrJ/z5DFD2ZtV6YP7e974Xff7557ao0ueuzM022yxThjZntR/Wv/GNb0Tul7v2wK785hmg5OeCCy5I5EX50ouBbfD63e9+l5m2XrjpbuAsY1TWDLBeyBNp9BO48cYbE3VFS0kWcfqq2K5jWs7ON4tQstTGqF7Y/rfYYotIxibbTZ06NRoyZEjCX5YBSmFPO+20hBFKy1/KAGwb0V9++eXopz/9aSx35ZVXjp544gk76rZz+wX8F7/4Rdt994LaAhlq7TxmGaDcNk08fM5tpzT7wl02UBztuHVu598n177mflnbyd55eQOg6JlNfMk5eraERaizunUt9DOTL98h6mTRNsYX/yuvvJJoy9Su6av4oq7uMvClIwSzuvoTpdctjxDPsYYD7W8U0c8bbfAfQ9QPv2T/VVff055xTOim+0I9E+Y5d8+/v/3tb5lByuTZ9aul5dW3GKdnuCeffDKyP4RUO6z3sbTZTwrrys3jbuKzjyF0pa621M1fyHbUZsA5BCAAgToIYICqgyoy+4EABqiGS7lOA5TJiowr9qwXGVU0jV7LZ7mGJzPIahug9DWWNpzW4K65b476cnbMmDEmqsLHLJl77LFHpP0YfE57U2kg2sSvo/IwbNiw+Ksxd2DXZ4DSF3K2DJ1rE+7dd9+9tbSfDFv2/X333bfUwK8v7QN5bd68eS1G9hJoJn8/+MEPoksuuSRhgBvItBJ3NQLaT0hLy9nGU1PWu+yyS/Twww97I5g2bVqkl3BfONWxa665xhtOF1X/7DZG5zJ47bzzzi0Din3PpCXPACW548aNi1ZbbbVEXdRsIIVdf/31E3GutdZakfKQ5j766KNIM57sAT2lRXVbX15muccffzyRBp8Bqmyb5rZTSotmfak+apNlzeqyuanuzpgxIyuZ8b1HH3002muvvRJpNtwPPPDA6Lnnnov95p3YvGxjNXrmJ4eeLdXSu7J65qe5+GrTulb1mcmXlxB1smwb40uHrq2zzjpx27D88sunzlq1wzddBoo7BDM7DyH7E8nttDyKPMeadNP+0s8bXXCPoeuHK9/9XVbfB7Iv1JLtvmXnZ8+e3foo0X3O1YdW+vjq3XffTWS7TJ6z/Or5a7nllkvMZjfPZDrq4yLtbeRzWXKz3pdtWaF1JWRb2mn+yrSjNgvOIQABCNRBAANUHVSR2Q8EMEA1XMpNGKCUJQ0677PPPq2BB3tpK/sBWIYd7TWir7VksDBOD3lpYRR+k002MV4LH/NkyuCV5q6//vp48MSkXwO1ZvaAO7DrM0Ap/htuuKG1n4rPsCa5GuiWQarXl90TR3f5M8PNHPXypZcAXO8T0Au0KVffMW0PIfsrVV+4tddeOxOOjDJark8GFLUXtvFE+6ip/XnsscdadUryNRuziFN+tBSHXtDNfmxGtuLRV5L6Kl8DpFlOMyV9+dK14cOHZwVt3dt///3j8MqH68q2aTLmnHTSSS1jk90GKU92e/v9738/GjFiRDR//nw3ytTf2qsuLa+6/uc//zk1rHsjbQAUPXNJLf6Nni02QJXVMz/NxVcHQteqPDP58hKiTpZtY3zp0DV7aWHNki/iBqIMQjBz8xaqP5HcKuWR9xxr0k37u/jDDPp5oxFLjnXUjyXS28/K6vtA94VDhw5ty4SWkMt6NnrggQcSYcrkOc+viVd1Wu/NeibW3svaAzRrNZE8uVnvyyYzdehKqLa0Sv6KtqOGA0cIQAACdRHAAFUXWeQOdgIYoBou4aYMUHa2tEeLpv9PnDixNdNIX9Ub443tr5/OZXDT7AkNOmkJLzHBINNPGkBeQxKQsUTL7k2aNCl65513Iu09ZpwMRapjeQYj498+6uFOs5VefPHF1tKbnciw5ZU5V3ugvOgL2jrc3LlzW22Q2mZ9uavBm25og9IGQOtgUFYmelaWWBR1q56Vz0lzIQbbM5MGDvWhjv7sWY3NEe2OmAayPylKgPb3tY6eFYrydf31az/vcuiG33WXRTfkcbCkoRfa0sHCmnxAAALdSQADVHeWC6nqfgIYoBouo4EwQDWcRaKDAAQgAIEeJdDNA6A9ipRkQwACEChEgPa3ECY8QQACEIAABCAAgQEjgAFqwNATcY8TwADVcAFigGoYONFBAAIQgEBhAgyAFkaFRwhAAAJBCdD+BsWJMAhAAAIQgAAEIBCcAAao4EgR2CcEMEA1XNAYoBoGTnQQgAAEIFCYAAOghVHhEQIQgEBQArS/QXEiDAIQgAAEIAABCAQngAEqOFIE9gkBDFANFzQGqIaBEx0EIAABCBQmwABoYVR4hAAEIBCUAO1vUJwIgwAEIAABCEAAAsEJYIAKjhSBfUIAA1TDBY0BqmHgRAcBCEAAAoUJMABaGBUeIQABCAQlQPsbFCfCIAABCEAAAhCAQHACGKCCI0VgnxDAANVwQWOAahg40UEAAhCAQGECDIAWRoVHCEAAAkEJ0P4GxYkwCEAAAhCAAAQgEJwABqjgSBHYJwQwQDVc0BigGgZOdBCAAAQgUJjAddddF40aNar19/HHHxcOh0cIQAACEKhGgPa3Gj9CQwACEIAABCAAgboJYICqmzDyBysBDFANlywGqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/Nttt1209957R3feeSd/MEAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdKDLdeD222+Pttpqq+jwww9veDSZ6CDQ2wQwQDVcfpoBNWTIkGippZbiDwboADqADqAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA50uQ4ss8wyrTLabbfdGh5NJjoI9DYBDFANl9+OO+4Y7bPPPtE111zDHwzQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB3och24+uqro2222Sb6y1/+0vBoMtFBoLcJYIBquPzYA6ph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAfaAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/BigGgZOdBCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEKhDAAFUBHkH7mgAGqIaLHwNUw8CJDgIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIBABQIYoCrAI2hfE8AA1XDxY4BqGDjRQQACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCoQAADVAV4BO1rAhigGi5+DFANAyc6CEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIVCGCAqgCPoH1NAANUw8WPAaph4EQHAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQKACAQxQFeARtK8JYIBquPgxQDUMnOggAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCFQggAGqAjyC9jUBDFANFz8GqIaBEx0EIAABCEAAAhCAAAQgAAEIQAACEIAABCAAgQoEMEBVgEfQviaAAarh4scA1TBwooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgUIEABqgK8Aja1wQwQDVc/ANlgFq0aFH06quvRlOmTGkd33///ejLL79sOPe9Ed0XX3wRvfbaa9HHH3/cGwkmlRAoSODll1+OXnjhhdbf/PnzC4YavN7q5KE2V+3ISy+9FL355puReIdocz/88MOWvLfffjtSWzUY3WeffRapbKZNmxYpn59++ulgzCZ5ggAEIAABCEAAAhCAAAQgAIEeIoABqocKi6R2FQEMUA0XR5MGKA18HnjggdG3vvWtaOmll46WWmqpxJ/Sst1220V///vfIw34GacBv9VWWy33b80114w22GCDaN99940uvvji6K233jIi2o5lZe65557RiBEjWgO4bcJquKCBzmOPPTbaZJNNoq9+9astTr/5zW9qiCm8yCFDhqSW1VZbbeWNUOWVVsaXXnqpNwwXe5vArFmzEu3AmWeeWThDakt8+rLFFltEegBLc/fff783nGQ98MADrWAy9H77299O9eeLN+3aGmuskZaUtutVeLQJ+8+FV155JTr00ENb+fl//+//Jdpbtb/LLbdc9JOf/CQ6/PDDo4ceeihNTNv18ePHRzvvvHO00kortWQus8wyreNXvvKVaJ111onOOuusaO7cuW3h7At1lqHiyWvjTz31VDs5bee33nprdMghh7Ta4GWXXTbBTv3XD3/4w+h3v/tdNG7cuDjsQOlOnABOIAABCEAAAhCAAAQgAAEIQKBvCGCA6puiJqOBCWCACgw0T1wTBigNyu21116JwWYNfMpYtMoqqyQG9oxRavr06XHSFf5vf/tba7DU3C9ylOHmH//4h/er/E5lahD3oIMOShjI4oQGONHMhN13393LpMxgdoCkdCzihBNOiDQQ7ZbRd77zneiyyy7zyn322Wejrbfeui3MD37wg2jS05O8YbjY2wRkcLJ1ZPPNNy+coQ8++CD685//HBnDhy3nvPPOS5Wj+vWHP/whEa+MCTKM656cZvTY8qqcK31FXRUebhya7bTffvsl2lwZUdZff/1IhnrfBwCHHXaYK6bt9+zZs1sGK5uJ2sT11lsvNkaZe2p/r7/++jYZ5kKdZag4stp4fejw2GOPmaQkjprdJHYmHzp+7WtfaxmcfPomPTRuoHTHxM8RAhCAAAQgAAEIQAACEIAABPqHAAao/ilrchqWAAaosDxzpdVtgNIgo2a9mME8xXfBBRckZiloCa611lor9iO/tgHKZOKqq65K+Dn66KOjhQsXtmTpa/cnnniiNZDsful/+umnGxFtR1fmkUceGS1YsKC1xNLUqVOjW265JTrqqKMiV6ZmFYR211xzTbTCCivEeZSRTgOh9957b6S0KF294q677ro4H6bsr7766szka9aX8aujZmZ88sknmWG42ZsEtPTb97///UR5yyjyzjvvlMrQueeem5AhvVEbo/Ygyx1wwAFxuCOOOCLh1TUiyLjy6KOPtuqflpi78cYb47CK78EHH2wZuRXuvvvua802Mnpc1AAViocyoqX1ZGAxaVh++eWjs88+u9VWmox+9NFH0R//+MfYj/zmGaBmzJiRKDPNZL3jjjsitfHG6beum7h1PP/8881t77GOMrQjctv4H//4x96PEhRGZbjRRhvF6dcsJ83UMu7zzz9vzYS1+4MsA1QTumPSxhECEIAABCAAAQhAAAIQgAAE+osABqj+Km9yG44ABqhwLAtJqtsAZQ+EajB24sSJ3nTNmTMnMRvKZ4DSHhz2wOaJJ57olSWDje1Pg4Vps2hcmZox5XMyRNmDjpKveEI5DRDbad57771bg6Gh5A+EHM16s/N00kknZSbj4Ycfjv1rSTPpBG5wEpBBx9YNcz5q1KhSGdYsFhPWPqr+ZLlLLrkkDjd69OiEV9sApZl8tgFCHm+66aY4rOJ0l66z9bioASoUD6Xvv//7v+P0qc3ScnlpTkvvGW5ZBigZXmS4MX5XXHHFNi4mDu3tp/vGrwyLTz/9tLnddqyjDO1I3DZey+qlObsd1qwn7U3oc1qO1eQvzQDVlO740sc1CEAAAhCAAAQgAAEIQAACEBj8BDBADf4yJof1EMAAVQ/XVKl1GqD++c9/xoN0GqzLWhpLCTz55JNj/z4DlPZIMYN+OqYZoCTL/bpfA4s+58pMM0AprGZc2fFrr6oQzh7MlPyLLroohNgBlzFz5sx4/yrlS8stalmsNLfPPvvEfGXwww1eAqZ+fvOb32zttWTq1a677loq05MnT451xsgwR9cwZAu+9tpr43D33HOPfSuxBJ900nV5Bij533bbbVvyixqgQvG4++6743yJwznnnOMmP/FbM8WURvnNMkDZBjv51UyvLOcy+ulPf5rqvY4ytCNz23gt55rmNOPJ6I/2/UtzmkGmvlN+0wxQTelOWhq5DgEIQAACEIAABCAAAQhAAAKDmwAGqMFdvuSuPgIYoOpj65VclwFKX8xrzx8zmLfyyitnGh+UuHfffTfaZpttop///OfRW2+91ZZeLc9l5OmYZYDSUlC2X99goCJwZWYZoO68886EzJ122qktjWUvaGaAGQBWerXc32Byxx57bIJZmhFSBkczw0yz5nCDl4CMkGbw/phjjmntqWbqqvYNyjJSulQ0O8mE1awnc67jj370o8RSn3bYG264IfbrGlPsGVC33nqrHax17hpXfIauiy++uCW/iAEqFA8tD2gvZbrSSisVWrZT7ajaXC0t6HPaE0myDNvvfve7Pm+Ja3oIXn311eMwCvv4448n/JgfdZShka1j0TZe/Mq0xVdccUXrQ4ebb745jq5p3Ykj5gQCEIAABCAAAQhAAAIQgAAE+o4ABqi+K3IyHIgABqhAIIuKqcsApaX2zICljpo9VNW5A4lZBih7UFPxb7HFFt7oXZlZBqjXXnstkacNN9zQK7PoRRnp1llnnVjmpptuGunaYHLaH8YevNayVpo94Lo99tijxUFGKHfJM9cvv3ubgL2H0nPPPReNGTMmrgOqq3fddVfhDNr1XPuOyYBttztp+w9lGaC0r9xZZ53V+tN+Sq4rYoCaPXt2K/ywYcPc4G2/Q/HQHnh23rNm+rQlIuPCuHHjEnJlVC7ijj/++ES4U045xRusjjK0IyraxssQaPOTLpV1TetO2fThHwIQgAAEIAABCEAAAhCAAAQGDwEMUIOnLMlJswQwQDXLuzUTQV+/h3Yy5NiDee4yV53E5w4kZhmgnnrqqUT8IWZAabDcztOvf/3rTrIRh9Eyc7a8EIxi4V10cu655ybyeeqppyZSZxsrjzzyyMQ9fgw+AmZfuHXXXbeVuQULFkTLLrtsrCNajq6os40XV155ZTRlypR4Jp3qlgzsWmbOdVkGKNev+7uIAcoNk/U7FA/X4BOqPTniiCPishHTJ598Mis78T3Fb7dvW265ZXzPPqm7DN1+I+sjg/XWWy9Os4zh999/v53UyuehdadyghAAAQhAAAIQgAAEIAABCEAAAj1LAANUzxYdCR9gAhigGi6AumZADRkyJB7I0yDk3LlzK+fMHUjMMkBpHyV78HP48OHe+F2ZWYOT119/fUJmWvwyfi2//PLxX9qX/zajDTbYIPryyy+9aez1i1rCa4011ojZSeeMPmjZq4033rh1b9VVV23tv9Pr+SX96QRef/31aOmll26V9xlnnBF73HHHHWP9WG211SLpRRFnGy+0T5GcazDxGZ+7xQAVksdGG20UMwzV5oqnbZSR3Pfff79I0bSWVLXbYC2v6HN1l2GZNv7QQw9NMFRblbZ0oC8vedcwQOUR4j4EIAABCEAAAhCAAAQgAAEIFCWAAaooKfxBIEkAA1SSR+2/6jJArbjiivFA3iqrrBIkH+5AYpoBSJFp0Nke/JwwYYI3Da7MNAOUBsR32GGHWKYGU337VCmS8ePHx/6UBt+snmnTpiX8nHPOOa30KT1jx46NRo4cGR1++OGR9si59NJLvbM4vBnq0otXX311Ir9meTAtm2bKadSoUV2aepIVisBpp50Wl7eWtDQ3zq9uAAAgAElEQVTONRhrObkizjZeXHjhha0g2odHRiyjVzq6+zRp3x5z390DKi/ekEaEkDzspS5lzA3l1EcYVpoVVNQ4qPjtfkAyfPt71V2GRdt4pVcGQbEz+dVRedbefEUNb1ncQ+pOVjzcgwAEIAABCEAAAhCAAAQgAIHBTwAD1OAvY3JYDwEMUPVwTZVahwHqs88+Swzgrb322qnxl7nhDiSmGaA0EK1BQzOIuO2226bureTKTDNA2QPFkjt06NDUpBcxQMmoZNKno/a9+b//+7/oK1/5SuK68aPrWsquV532trJnUsiAJyPc6quv3srvJptsUmpgu1c59HO6NcNvrbXWapW3uxybu7/acccdVwiVbbyQ0da4a6+9NlGP1l9//UgPZsbZy18OlAEqJI+62txFixYlOJY1bH33u99NhJ81a5YpgvhYdxkWbeNNgh577LHEkpCmDVZfeeaZZ3qNaCZs3hEDVB4h7kMAAhCAAAQgAAEIQAACEIBAUQIYoIqSwh8EkgQwQCV51P6rDgOU9lwxg3Y6br755kHy4Q4kGgOUvsh/4403onHjxkWHHHJIIm7NhFC4NOfKPPDAA1v+Z8yY0drrRDMlNFhu8qOl9c4+++w0ca3rRQxQe+65ZyxTspdZZpnWbxlmtCfMzjvvnDDYmPhHjx6dGXc335SRzeRDR7MUm85DLnPVzQz6OW2ahWTK3yyXZ/NYZ5114vsyVhZxtvHi/PPPj4PIuPOTn/wklqd4R4wYEd+//fbb43sDZYAKyaOuNteV++1vfztmWORkww03jDmrDCZPntwWrO4ydNv4tI8M7ISpPTLGUqOz5iij+R133GF7L3yOAaowKjxCAAIQgAAEIAABCEAAAhCAQA4BDFA5gLgNgRQCGKBSwNR1uQ4DlIw3ZrBOxy222CJI8t2BRMn++te/HsloY8dnzn/+859HTz/9dGbcPpkmvHs86KCDMo1ZJiIZw7SknvmTQcp1MsrZ8pUHzXCaP39+wqsGypdbbrnY77LLLhs9//zzCT+99GOrrbaK82Ly//vf/76XskBaOySw//77t8pesxPfe++9NilHH310QjfsJfraPP/ngm28cPd5e/bZZxMzIdXWqb7LjRkzJo5roAxQIXlo6ThTn3TMa3PnzZvX2m9NyxW6fwsXLoxxuzPT1lxzzfhekZNNN900ka7p06e3Bau7DN02vogBSonUcoGa6Sq9sdmac+01VtZhgCpLDP8QgAAEIAABCEAAAhCAAAQgkEYAA1QaGa5DIJsABqhsPsHv1mGA0sCdGaTTsehshrzMuQOJki3jjJan0wwifbG+/fbbRwcffHDhGTWuzG9+85utGU/6ct8dePzLX/4SffLJJ3nJLHTf/rpeA/IvvPBCarjzzjsvwVPLQPWqe/TRRxN5WWGFFWKjQK/miXTnE1iwYEGk2YOqs7/61a+8AewZQfJnz1jyBoiiyDZeDBs2rM3bYYcdltC3fffdt+Xn7rvvjq8PhAEqNA+1S2Jm/vLa3G984xuxXxPGHO2ZZLNnz074U3tbxrnx+PZRqrsM3Ta+qAHK5FPGUu3jZ2apGk465s2GNTLMEQOUIcERAhCAAAQgAAEIQAACEIAABKoSwABVlSDh+5UABqiGS74OA5SysNJKK8UDl9/61reC5ModSDRL8FUR7sq0Byc1qKsBa3vAUbOqtJ9RVWcbtzbeeONMcR999FEkQ41Jx29+85tM/91+0x6U3mmnnbo9uaQvAAF7T6bvfOc70a677tr298tf/jLWcen6NttskxuzbbzwGQNk8Pja176WkDtx4sRo7Nix8bWBMEDVwWPllVeO87TGGmtkstMeWPaSh6Zt2WGHHSLNYDXu008/jWUaP/rAoIjTTCp7mU0Z2rU0ouvqLsOsNt5NS9bvqVOntmaWGQ7m+OCDD2YFS9zDAJXAwQ8IQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBou/roMUBtssEE8cKmBRw1kVnXuQGLdBiil97PPPovWXXfdOC8adNTSelWdbYTZdtttc8VtsskmcRo0e6qXnWaZmcFbDFC9XJLF0y7DrSnzoke1G74ZM3astvEibWbgqFGjEnFvtNFG0QMPPBBfGwgDVB083DY3b7bmiy++mDAQaYaPuwSoWNvGcpWdlhgt4l599dWYscJp7ySfq7sM3X7D/sjAl56sazK+2W2x8nXUUUdlBUncwwCVwMEPCEAAAhCAAAQgAAEIQAACEKhAAANUBXgE7WsCGKAaLv66DFC77LJLYvAxby+mItl2BxKbMEApXffff38iL1pK7O233y6S5FQ/2qPFDMRrQDzP7bbbbrF/zSrwzSTIk9Et9zFAdUtJNJMOex8hGVu1h1na39///vdYz1U/NGCf5YoYL7744otos802S8g95JBD4t9NG6Dq4uG2uZMmTcpC17r37W9/O+aw/vrre/2rzExbpaNmjxVxEyZMSIQzyx+6YesuQ7ffqGKAUtq1LKG972DeDFY7vxigbBqcQwACEIAABCAAAQhAAAIQgEAVAhigqtAjbD8TwADVcOnXZYDSfkn2oOVll11WOWfuQGJTBiglXDN17Pz88Y9/rJSfvfbaK5an2VB5TvvmmPi1pFgvOwxQvVx65dN+8sknx7p73XXXZQqwjRHS9z333LOw/7QZUBIgY4y9HJypSzo2bYCqi8cRRxwRc1a+Lrnkkkx2umnPmtpyyy29/o899tiE3AMOOMDrz714xRVXJMKNHj3a9dL6bZd5HWXo9htVDVBK9FZbbRXnTXsQFnUYoIqSwh8EIAABCEAAAhCAAAQgAAEI5BHAAJVHiPsQ8BPAAOXnUtvVugxQmuFgD/JqwC7PzZw5M1p11VUjfZU/Z86cNu/uQGKTBqhnn302kR8tD6YlrDp19qCuZOUtl/XjH/84jn/IkCGdRtsV4TBAdUUxNJIIzT7Snk9qC4osqadE2bNy1D4tWrQoNa1FjRcScPDBB8d1yG6bmjRA1cnjhRdeSORv8803T+Vmbmj2jmHxs5/9zFxOHCc9PSn2I78rrrhiZpmYwPbMKS3vl7acYt1l6PYbaQYozdLV0ojaBytL55Q/GeEMtx/+8Icmy7lHDFC5iPAAAQhAAAIQgAAEIAABCEAAAgUJYIAqCApvEHAIYIBygNT9sy4DlNKtgTwzSKfj5MmTM7NzyimnxP6nT5/e5vfNN9+M70ueluuq6lyZRx99dKpIzcaw8/PLX/7S6/fDDz+MtO+M+dPAsOsefvjhhKwbb7zR9RL/fv3111uD9ybu4447Lr7XiyerrLJKnPftt9++F7NAmgsSuOeee+Ky1uB+EWcP7kvn//3vf6cGU5ti6sXpp5+e6k835s6dG6200kqxfxOurAHq2muvTcjQEp1FXd08VJ9MvnTMW/rUrotpBijlbcMNN0zITZvNZDjYRiWlI6utrrsM3Tb+mGOOMclMHMePHx/n8bbbbkvcc3/suOOOsd+9997bvZ36u4rupArlBgQgAAEIQAACEIAABCAAAQj0JQEMUH1Z7GQ6AAEMUAEglhFRpwFKA7v2YKiWe/r000+9yZs1a1Zis3ufAcod1NSMhqrOlXnooYemilSatNySnafbb7+9zb89kCm/Rx55ZJsfXbBnCGj5q88//9zrT+FNnJpF8vLLL3v99cJFzQBRHkx+iux/1Qv5Io1+Attss01c1kXr6/nnnx+HkZ789re/9QuPouiRRx6J/WpWYZ679NJLY/9GB8saoEaOHJmQccstt+RFG9+vm8e9996bSJtm5yxYsCCO3z6ZMmVKwm+WAeqhhx5KLGG4wgorRJoZ5XOazWkv7bfGGmtEH3/8sc9r61rdZei28Ycddpg3LXa7vfXWW0d6kPe5V199NbEHlPS1qKuiO0XjwB8EIAABCEAAAhCAAAQgAAEI9AcBDFD9Uc7kMjwBDFDhmWZKrNMA9eWXX0b77bdfYpBTA7AawLOdvtJfffXVE/58BijtH2MGjXX83ve+l2q0seVnnbsyN9tssyzv0Z/+9KdEGrR/02uvvZYIYw9kKp1pBigN6tr52WOPPRJLP8lYc/zxxyf8nHrqqYm4eu3HhAkTEvmRMertt9/utWyQ3gIENKvP1u/11luvQKioNVvGDqfl23yzCCXMHtD/xS9+kStfRl4ZPW35ZQ1Qu+66ayJ80f3gmuChNvd3v/tdIn1bbLFFJGOT7aZOnRppKU+bQ5YBSmFPO+20hBFKe9GNHTs28VGBjOM//elPY7krr7xy9MQTT9hRt53XXYZuGy8ePue225rB6y4bKI62/ug87aMKXxyd6o5PFtcgAAEIQAACEIAABCAAAQhAoL8JYIDq7/In950TwADVObuOQtZpgDIJuuCCCxKzXjSLSEs6abko1/BkBkRtA9TChQujk08+OdJgprlvjlrWa8yYMSaqwscsmTIEaQ8rn9PeVBp4NfHrqDwMGzYskkw5dyAzzQAlv5pBpT1VjDzNLNhuu+1afyobc33ppZduDcynzZLypbWbrs2bN6/FyF7yy+TtBz/4QXTJJZdUNiZ2U377OS3aP0fL4bmzBVXeu+yyS6TlJ31u2rRp0d/+9jdvONWxa665Jg720UcfRZrxZNcRyd93330jzXjJco8//nhcrxSmqAHq0Ucfjfbaa69EWKPDBx54YPTcc895o22Chxux2iN7pqHOZQDceeedWwYU+57JQ54BSnGMGzcuWm211RIMvvrVr0YKu/766yfiXGuttSKVaZqruwzLtvFuuy0uanfVPu20006tWV02N7VlM2bMSMte4nqnupMQwg8IQAACEIAABCAAAQhAAAIQgIBFAAOUBYNTCJQggAGqBKwQXpswQCmdGnTeZ599onXWWSfxFb0Z/NRRhh3traFN4mWwME5GHw0E2n7t80022cR4LXzMkymDV5q7/vrr29KigUnztbw7kJllgFIcb7zxRiSjl5aqsvOlcw3uipuWqepl5y735eZTs1yMAa+X80nao+jdd99t02O7vNP2zLFnwtj+zfnaa68d49WsQ3PdPQ4fPjz2l3ay//77x+Efe+yxNG+J67/+9a/jMG6c+v3nP/854d/8aIKHics+yrCm5QtlQFH7aRtPll122Va7orzvvvvurXxpdmoRp/xoqVLN/pEc5d3IVjyaRarZXjK8Zbm6y7BsG6+ZmCeddFLL2GR/7KA82f3P97///WjEiBHR/Pnzs7KXuNep7iSE8AMCEIAABCAAAQhAAAIQgAAEIGARwABlweAUAiUIYIAqASuE16YMUHZatSfJk08+GU2cOLE100hfkRvjje2vH88/+OCDSIPCWrZKhqm8Qdx+ZESeIVCVgIyd77zzTjR79uyqonoivIwlWnZv0qRJrXxreU/j1MaovemkrdHDrmacvfjii62lSDuRYdJR9lh3Gc6dO7c1g0t91eTJk1v5w0hetpTwDwEIQAACEIAABCAAAQhAAAJ1EcAAVRdZ5A52AhigGi7hgTBANZxFooMABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgMGgIYIAaNEVJRhomgAGqYeAYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPw77bRTtOeee0ZHHnkkfzBAB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9ABdAAdQAfQAXSgy3XgqKOOirbddtvoiCOOaHg0megg0NsEMEA1XH6aATVkyJBoqaWW4g8G6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOdLkOLLPMMq0y2m233RoeTSY6CPQ2AQxQDZffj370o+h//ud/otdee40/GKAD6AA6gA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOtDlOjBt2rRorbXWivbff/+GR5OJDgK9TQADVMPlxx5QDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQiwB1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAgUJfPnll9GsWbNaf2+//XbBUHiDAAQgAAEIhCFAPxSGI1IgAAEIQAACdRDAAFUHVWT2AwEMUA2X8kAYoD7++OPorbfeil588cXo+eefbx1fe+216P3334/0kuO6zz//PFKYvL9PP/00+uKLL9zg3t9FZS5cuNCbJq/QEhfVSeTlJ+t+iagGxGsaX5VRlsvionu43iGQpgPS688++6xURrL0oowgtQ8ffvhhNGPGjFYbNG3atGj27NmR6nlRJx3Oqptl2iE3zjzZZdtHu75llYfyU4aBm273t9Kptv25555r/c2dO9f14v2tNGSxLXPPx8oXaVWdyONq0txpXyLd/8c//hH/pRmhstIxEPWtKleVVV59oK5FreeTTuqa+DZd36rqRJaOm3qmY6d1zdc+dHqt0zYwLz7lr8zzcx1lnFcOitM4u1x852nPdVl135Zv4vEdOy2DOpj50meuVa0XkpNXJoa98la0bzTp07FIP5SVhl7tg5T3LF0UV/qhzvuhpuuayrNqfcvSc1PPdOy0rtn1Lus8Lx2K33Zp/qW/WS7r/ctuv9PkGyZuehRnlmyFs+XLf5k48vyadJUtqzy5bj71246rynmRtruqfhfhbPKgvBVJU5Z+cS8cAdWXpZZaKtptt93CCUUSBPqAAAaohgu5KQOUOuxnnnkmGjFiRDyQZg+qmfNTTz01uuqqq1oP9AaFXrbN/bzjcccdF51//vnRP//5z2jq1KlGRNuxrMxzzz03uuWWW6LJkycH6Wwff/zxwnly83ziiSe25afbLrz66qve/J1yyinR/PnzU5M76elJ3nBi8PTTT6eG40b3EUjTAZXlzTffXCrB8u/WA/0+4YQTCsn55JNPogcffDAaOnRoLEdhjUy1G9dff30kQ3iWk5H8pJNOisOZ8L7jaaedFl199dXRfffd1xowzJJr7o0aNSpTtvLhOrVLvvh17cYbb4y957V5ajNDOXG001RE9gcffBAdf/zxiXC2jLLneS/VoXQij6udbulZ2b5ELxS2jDQDVLfUt1BcqWvFamMndU2Sm6xvoXSi7rpWjHgxX52Wi096p8/PdZVxXjnceuutcTbOOOOMRPtlt2U6f+SRR2K/5kTpzupnx4wZY7xmHjspg7qY+RIaql5Idl6Z2Nzr6ocGWx8krvRDPs1tv9btdU0pDlXf6q5r7XT9V/LSYbfDkpBWP0O9l+elx/ceoPd6u21yzydNmpTIfF4cdp7z/LpxqV0cNmxYNHr06Myxnjy5dhroTxLFx4+aCWCAqhkw4gctAQxQDRdtEwaoOXPmtDp1u7PX4K8MRRqkta+b8zfeeCMmoS8t/vWvf3U0OHnZZZe1HjpjYf85qSJTRrR58+a5Ikv9njBhgjffJv9Zx+HDh5eKayA8y8h0++23R3qgc/OiAfM0J13RoLkbRgYI3cP1DoEsHZARNc9AYHKqumobi4xuyJgko1Kee/311yMZtk04Hc2Lhl687Os610uSviDzuU7bDcV3xx13tL6C88k112Tg9hnplc577rmn9fWf8WuOzz77bKQ2wc3HyJEjo5deesl4a8Wd1o5eeumlrZfT2HPFE72A2ek5/fTTU5maqDRLyg5T9TxLv7pBJ5S/In1JUQNUN9S3buBKXcuva6pzTdW3btCJonXNtEUhjp20gb54qzw/11XGWf2g+pJXXnklzor6aLf/NW27+tp333039mtOJF/3fEYotZkaSC3iOimDupi56Q1ZLyQ7q0wM77RjqH5osPVBVbjSD+X3Q03VNZVjyPpWd11z24q031npcNthyciqnyHey/PS42u333vvvei6667zPvvro0Ddt11eHHbfk+U3rS20r1944YXRggUL7Ohb51lyXe5N6XhI/VYms/JoM/KdF+lP2qByIQgBDFBBMCKkDwlggGq40Os2QOlrbXvWgQaeZXxRI2mcjE3uV5K2Acr404us3dnddtttLTn6OlQGIS2rJUOFHvxtf9dee23qrCVXpr58WbRoUWuJML0YawmpO++8MyFPsi+++OJEHkwaix7vvffeNpl2mrPOH3vssaLRDLi/tHyqrLKcyswwKDtbJksu95onkKYDRWe0TZw4MdYFoxM65umQcqpZkGpzTDgZvLVclR6u5bR0gGZm2m2U/OrFJ2tZgYceeiiWKf9qi9QO6c8s8XfXXXe1GdgVv+LLcvpK006z5Oex0kuSPVgng4+7DISJ023zzj777FzjkAlb5Kh47bQY9r423Zbnvqidc8450fTp0+NlTFw9eOGFF1plpLKUoU15NnHpmGaAqksnXK6h+pKiBijDcqDqW11cqWumZNuPndY1SWqivtWlE3XVtXbCnV2pUi52jFWfn+suY7cczjrrLO9HEhoUtNtmnesDpTz36KOPJsLpQ4u0dt2V1WkZ1M1M6ayrXki2WyYD0Q8Ntj5IXOmH3Bq25Hc31zWlsq76VlddW0K22JmbjrR22EhLq59571RF38vd9OgdQ+9GWU7vXHYfcc0112R5b2vnsvLspkfGNr1n6dlafxo/0ruGb/xIBqU058r1pYH+JI0e1+sgIH1mCb46yCJzsBPAANVwCddpgNKa3+qQzUOFDEP2F/l2VjVN2R509Q1WulP8NaPA5zTAa+I0R80S8DlXpm+KuML5lszTgGinTi/fJm16wJFRyeyX4h4ffvjh2K9mQuhhv1ecytvk0z5qdkbWA+kDDzwQh9MgBK53CaTpwJVXXpmbKRmBfLN7pEtZ+iPBMsrYX16rfZk5c6Y3Ti2pYLc/kq/lINOc226ktUUaLNOLlK37agfffPPNNNGt6xdccEEijNrHPHfRRRfFYbJe3ty028v05cVR5H7aMppjx47NDO6+qOmLPttpqSabo2aL2W7KlCmJ+76ByiZ1IlRfUtYANRD1rUmu1LUlWt9pXZOEuutbkzoRqq4tIVvtrEq5mJhDPD/XXcZuX6JBxDTnLi97//33p3mNr8+aNSvRpmsguajrtAzqZlZnvRAbt0xC1Y0y/dBg64N8XOmHltTEbq1rSmGd9a2uuraEbLEzNx1Z7bAkptXPUO/lbnpuuumm3Iy4Hxvkja+4cWTl2fWbVneVSNfQrHcO9UM+58r1pYH+xEeOa3URwABVF1nkDnYCGKAaLuE6DVDjxo1LvDzqq5ssZ8808hmg3HV3sx4i3CndaWvGuzLTXtaUbnc5D31Z2KnTA5kebLQfSN5Auv21kpbh6iWXtua08u5b+9/kzX4I1MsNrncJpOmADDF5S1nqizzb6GDOi+z9ZBt5FU6G3SznGjg0KzPN2Ou2G1ltkZbzM/XdpF9LO6Qt86c0yiht/OpoZmxlpf/yyy+Pw2S98Llpz2rzsuJLu6dlT5VmdyaqllzNcvaLmgYrXeeWj2uAkn/FYbj5DFBN6kQW1zJ9SZmBPzEYiPrWJFfq2pKa0Wldk4S661uTOhGqri0hW+2sSrmYmEM8P9ddxm5fonYtzbmD1L7BOjesHUYfomTNSnbDdloGdTOrs16IgVsmoepGmX5osPVBPq70Q0tqXLfWNaWwzvpWV11bQrbYmZuOrHZYEtPqp56dQ7yXu+nJaoNMDrXXk3l21/HJJ580t7xHN46sPLt+s+quItPqC3Za0sYgXLm+NNCfeIuPizURwABVE1jEDnoCGKAaLuK6DFAfffRRYt8WzS7Ql0hZTmv9ahBVf7514bUcif1QIINVmtPDi+03bRq1KzPrQempp55KyMwbUE1Lm65fddVVLVma6ZPlNE3c7FOjAd0iMyGy5DV9z/5CyB4cVtkoX9ITn7OX20qbveYLx7XuI2DrgDuzR7P7spyWSjD12H4p0BJvWU4bRzhV8/gAACAASURBVNsGEM2EyjP0ythk6pqJM0333HYjqy1SOmVAcve7y1pKsxMDlGaUmXRnLVtZNu1ZnN179suWXsY0yGjSpKPupzk7rGacuq6IAUpf1Zv4XANU0zoRqi8pM/AnZk3Xt6a5UtcW1wy7vpSta5Jghw9d35rWiVB1zW1zOvltc+2kXBRnqOdnOy2hy1jpdPuSrHJQH2j3yepr8/rkG264IW7P854V7LKy8122DOywoZnVXS/KlkmZd5oy/dBg64N8XOmHFtc4u750U11T6uqub2XavzJ1bTHZ4v/LpENS7fpZx3u5m568uqI02R8b6BlevLKcG0dW3+P6zUuP+96S1ve4cn1psOsH/UlWiXIvBAEMUCEoIqMfCWCAarjU6zJAac8SMxCoox5Mqzq3s896iHC/8NHgtc+5Mn0PECacloSy86QZEp067TMl41LeC7i9xnDopbI6TXuZcPaDrvKiKf42w7SNTzFAlaHc3X5tHdBX3fb+QNKHNKcBK7Msnl6S7FlEeQYoPejbeqY9moo492vJNP10242stsjEa8/qU9pkkEr7ortXDVD33XdfzF37frgv3VlfV6o9FCP9+QzTRQxQGnAwMty2tWmdCNWXlBn4k641Xd+a5kpdW9yiVKlrklBnfWtaJ0LVNdNWVzlWLRfFHer5uc4yVjrdfjCrHOTf7dey9hzRBwSa6ay+Uoar+fPnFy6WKmVQJ7O660XZMinzTlOmHxpsfZCPK/3Q4urYrXVNqau7vpVp/8rUtcVki/8vkw5JtetnHe/lbnqK1JVuMkBdffXV8XuM+h8tWehzbj59/R/9iY8c1+oigAGqLrLIHewEMEA1XMJ1GaDswWJ14GlTmMtk1+3ssx5qtEG94jV/viWdFLcr0/cAYdLorpucNqtKS2tpppL5S1vGy8hNO2oPgNNPPz3Og7snSlq4brpuP+hOmDAh8eBrysY3CIEBqptKsVpabB0YP35821KWqoM+Z68JrnP7q7Q8A5S779LLL7/si6LtmjvwN3ToUK+RyG03stoiE8ns2bPjumx0X9d8zh2oK7IE30DPgFK7p82GlTdx028tsWjyqqNmt3bqihigsmQ3rRMh+hLlp8zAn/w3Xd+a5kpdi1p1q866Jj2qUt+a1olQdS2r/ShyL1QbWMfzsy/9VcpY8tx+MKsc5N/ez1T9gT5ISXN2Xyx9KupClUFafFWY1V0vlOYyZVL0nUZyy/RDg60P8nGlH6q/H6pS11Rmdde3uuqa0l7GlUmH5Nr1s473cjc9RepKtxig9OGaPi6231vSVu9x85nX//nKtIqO163fSm+ZPJbpT3wsuFadAAao6gyR0J8EMEA1XO51GaC05JXdgWt5varO7QizHmq0V5Idf9r+U67MrAcIPajZMtPi12C37S9vveE0Llqiy8i5+OKL07x19XX7QddsOq3lwUy+dNQsGA0a2A4DlE2jt89dHXBnJ6bVzREjRrT0RLOgZICxH7bzDFD2LCvpWNH255133knopsLqCzbXue1GWlvghhs2bFhCvgbZfK4XDVDTp0+P83b33XfH2bKXXdSX7EWMaXFg66TKi5rENK0TIfoSpbvMwJ/8N13fmuZKXYuiuuua9KhKfWtaJ0LVNeW7igtVLnU8P/vyVaWMJc/tB7PKQf7djzDUN6Q5s0y1+mDffn9p4UKVQZr8KszqrhdKc5kyKfpOI7ll+qHB1gf5uNIP1d8PValrKrO661tddU1pL+PKpENy3fqpayHfy930FKkr3WKActtEfdiX5tx85vV/PjlVdLxu/VZ6y+TRZVek3H1MuNY5AQxQnbMjZH8TwADVcPnXYYByX1T0AukaGDrJptsRpnVuWtZKBhvFa/6ef/55b5SuzLQHCA1C23vQaCA1bfaC4jLx6jh69Ghv3FkX9RWO+cJZMp577rks7117z37QNV+7albEySefnGCkGS62s2e/pO3DY/vnvHsJuDqgtuCss86Ky//MM89sax/eeOON+L5ZBs8ekNKDd5rTzEG7/um8qNFD9c4Nq2XdXOe2G2ltkRvONqIpnrS1xXvRAKV2zrCbNWtWnHV7iRbd77Q+V3lRGwidCNGXCKLbn0r3slyT9W0guFLXotYzRZ11TfrVaX0bCJ0IVdey6lWReyHaQLe+q5xDPD/70t9pGRtZbj+YVg7Gv47nnntu3E8ob76l9bQEq9kvSkvVusup2vLc8xBl4Mq0f3fKrIl6oXQWLZMy7zSS6+plVj802PogH1f6ofr7oU7rmsqrifpWV11T+su4oukwMt36qesh38vd9BSpKwNtgJK+aE9us+yr+iZ9MJj17ujms0j/Z8rAHDvV8Sb0W2ksmsey/YnJP8ewBNRPL7XUUtFuu+0WVjDSIDDICWCAariA6zBAffjhh4kXTG02HMK5HaHvoUaNr/ZKMoMzOuorS133OVem7wFCa9G7A8d33XWXT1zrWggD1DPPPBPnQYP1ZV7AUxM2ADfsB92xY8fGKXCXYpGO2Pu+2LO/Oh2wjiPjZEAJ2DpgZjvpaNdRzYqynfZsMvfNEo32EnNZBigtfWnC6qiBrDIDeGbfKSPDNqaYNLrthq8tMn7to50vyTdGWduPznvNAKU20nDTAKPtZs6cmSgPLS/Viev0RU1xDYROhOhLlPYyA3/y32R9Gwiu1LX665r0qNP6NhA6EaquKd+dulBtYF3Pz758dVrGRpbbD/rKwfg1R3d1At/y3Pbz37/+9S8TNPcYqgyyIuqUWRP1QukuUiZl32kkt0w/NNj6IB9X+qH6+6FO65rKq4n6VlddU/rLuCLpsOXZ9bOO93I3Pfp4V+MyWX/m/cG8d2n/2CznxpHV97h+tcSe3kH1pxVq9G6pDx1M3DrecMMNLR0KlYY0OZ3qeBP6rTS77HycO+lP0nhwvRoB9dMYoKoxJHR/EsAA1XC512GAeuuttxIduWbyhHBuR6gHBH2douW1tPasNp93Zz5pps2cOXNSo3dlDh8+PNJguOTpAUgvy/ZyKPo6Rg9sWQPaVQ1QmsF1/vnnxwy1SWivOvtB1xgflBcZ1M4777w4j3rgMzNddP+JJ56I72GA6tXSX5xunw68++67cfmq7GWYMc4eRFI9ME77B5kXhCwDlNv+KEwZ567/rSV9XOe2G0UHI/7973/HeVC67KXq7Dh6zQClttKUjVlq0+RHbaW9l53a5E4M6p2+qCkdA6ETIfoSpb3MwJ/8N1nfBoIrda3+uiY96rS+DYROhKprynenLlQb6PIL9fzsy1enZWxkuf2gb3DK+DVHu31Sn+H7IMHu/958800TNPcYqgyyIuqUmVuudTyXKN1umYSqG2X6IbuMzXN/Xc98A8WVfqj+fqjTuqZ60IRe1FXXstof3z03HXntsK9+Sm6o93I3PebdoMxRbXmWc+PIyrPrNy8dMkYV2XPblZuVhrS8dKrjTei30uzmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+HAUozBuwOPtQLtNsR2nH4zmWM0oNWlisjU1/v+JbjcuVraT4Zw8yfjFllnL2Ro9n/pkz4bvKb9qCrNNpr9ZvyM7Nd7IEEDFDdVKLl02LrgG1wufDCC+N2QkYJLSkg9/jjj8fX7aUZ7UGpLAOUO+NGRuMyzjVA+V5E3Haj6GCEZk4aXddRbYTP2XmVv6xlIEx4e4aY1nNPc52mPU2ertvp9e23deuttyby/corr2SJ897r9EVNwgZCJ+xyds+L9iVKe5mBP/lvsr4NBFfq2qVxXaqrrkmPOq1vA6ETbv2yf5epa8p3py5UG1jX87MvX52WsZHl9iVFBuA0yGl/1KXZ7/YHCXrGNuVnf4Bi4sw6hiqDrDg6ZdZEvVC63TIxLH3HMnWjTD802PogH1f6ofr7oU7rmsqrifpWV13Lan9899x05LXDdv00BmIjN8R7uZueSy65JB4PMeMi7lFtkd1G1WmA0qo4yrf+xowZ0/oIQuNUdvw6v/322xN9k2Fkjm4+87ibcPaxUx1vQr+VTjePLiP7d5n+xGbAeTgCGKDCsURSfxHAANVweddhgHKnButlM4TzdYQaiD7++ONbf5rmfcUVV7RmU7hLeqXF78ocOnRoazB15MiRbRuYyqCVNZsqLY6y1/WwZjr1oi85ZeNoyr/9oKvZZK5zHzrFXTMmJk2aFDPAAOVS663ftg7YBih7ny/pu9mnTS8H+i3DkW14sQeXsgxQ7pe2kqVZVUWcBsLM3hOmDvrqvNtuFK2nmuVn5Or4wgsveJNl51X+bA7eAFHUWkbCyG7SAKVBcBOvjIo+p03kjR8dyyyrZOR1+qKm8AOhE6H6kjIDf8prk/VtILhS1xbvbVlnXZMedVrfBkInQtQ1zbo2z5K+o55jffsViVXINrCu52el03WdlrGR4/aDRQfg7H2a1B+YD48kVwOTpq9I2yPRxG8fQ5aBLdc975RZE/VCaXXLJETdkNwy/dBg64N8XOmH6u+HOq1rKq8m6lvoutZpH+SmI68dtutnHe/lbnqK1JWB3gNKOjNlypR4KXHTB9krs8iP7dx85nG3w5rzTnW8Cf1WGt08hupPTP45hiWAASosT6T1DwEMUA2XdR0GKHcQVy/zWlauqnM7wiIPNXlxujLtB4iFCxe27f105plnJvYqypNf9r79YKgHIM2m6mVn50fLj7lO+x3ImGAe9nTU+v/2HlgYoFxqvfXb1gHbAKU9v9Q2mLLXPmv219/ug79tlMkyQLl7aEj+3LlzC0Gzv7426fIZf9x2o2hbdPXVV8f5lXzf/lJKqJ1X+fOlwc2QjO8mzU0aoLQkqYlXR30I4P5phpvtR/vale0TOn1RE6eB0IlQfUmZgT/ltcn6NhBcqWuLB/7qrGvSo07r20DoRIi6Zs+8tdsq+zxtBnzINrCu52e3v6hSxkaW2w/a5WD8+I7uMtX2PiQjRoxo9RX6ECTN4OeTGbIMfPLNtW6uF0pjVplUeacp0w8Ntj7Ix5V+qP5+qNO6pvLqxX6o0z4oq86bdss+2vWzjvdyNz1F6ko3GKDESEu+uu8rmm3kc24+i/Z/tqxOdbwJ/VY6s/JYpT+xGXAejgAGqHAskdRfBDBANVzedRiglAV9JVHkxb1Mdt2OsMhDTZ58V6b7AKEOVkYnOy9Zg7t58eXdHzVqVByXBqt73dkPurbxwc6X9riy+Wr9ZftBHAOUTav3zrN0QEYnU/YyRl177bXxb/uraOXaNspkGaC0lJ87iyntBcKlaadV6dIsLJ+hxG03irRFkuNudLtgwQI3Ca3fdl6VjiIGqMsuuyxml9VGdZJ2byKjqDVb0W0fTXnmHZWOMq7TFzXFMRA6EaovKTPwp7zaOuy2uaHr20Bwpa4tGfjLq2Pmftm6Jj3qtL4NhE6EqGv2M4fh5h59BijN2A7dBtbx/OxrazstYyPL7UvccjD+3OMnn3yS+PhEM9/l7K+61VYVdXWUQVrcnTJrol4ozXll0uk7TZl+aLD1QT6u9EP190Od1jWVVxP1LXRd67QPykuH25Zl1U/jt8p7uZueInWlWwxQyr/9nKxnAHeZQsPIzWfR/s+E17FTHW9Cv5W+vDx22p/YDDgPRwADVDiWSOovAhigGi7vugxQWq7Ofnl/8cUXK+fM7QiLPNTkRerK9D1APPfcc4m8KF96gAvt9OWNzayTfVJCp6mqvCIPuvri99xzz03kfdiwYfFvDFBVS2Fgw2fpgJags3XenPv2frCNMlkGKOXWfEVt5OnFroiTrpkwOl511VXeYG67UaQtcsNk5UHx2unQbLE8Z7e5Wtc8zbnpyEu7jF/jx49vGYVdY9y0adPidGrpRMlO+7vjjjtiv8qbZJZxnb6omTia1olQfUmZgT/lten61jTXPH0VA1fHqWvl6poYVqlvTetEiLo2b968Vt1R/fH96fnM5+poA+22XG1liOdnX9qrlLHkufXMVw6+eHXN3rNQeRT/cePGxX2Elm0t6uoog7S4qzCru14ozUXKpJN3mjL90GDrg3xc6Yf+EfHM93bcXqkN87V/Zepap31QkTpvt2dZ9dP4q/Je7qanSF3pJgOU3k1UnubvuuuuM1gSRzefvvJPBPD86Nf+xIOCSwEIYIAKABERfUkAA1TDxV6XAUpf4JvOW8e0L0js7GpwUzMC0vZrcTv7Ig81tnzfuSvT9wChdF100UWJ/GiAwB2M9ckvc83eD0kD8KHll0lLKL9FHnQV19SpUxN8bd3BABWqNAZGTpYO6GFJm5Db5a1zPZS7rowB6rbbbkvIVNgizl3GR/tU+ZzbbhRpi+67775Emu666y6f6NY1bX5rM0kb/LQFaNk7E2bChAn2rcR52bTbyyXpC3Xb2e183n4dth4onWl72Njy7fMqL2qS07ROhOpLygz8KZ82Z3cGVB31rWmu1LV/RHXXNelRlfrWtE6Eqmt2e1P0vI420JaptjLE87MvP1XKWPLcvsRXDr54dc2NW/t+mj5M+22prSrqbF511w033WUMZXXXC/EqUiadvNOU6YcGWx/k40o/VH8/VKWuqczqrm911bWi7Z7xVyQdxq+OWfXT9tfpe7mbniJ1pZsMUNoGwLxL6Zi2Go2bzzL9n+FcRcfr1m+lsUgeO+lPTP45hiWgfnqppZaKdtttt7CCkQaBQU4AA1TDBVyXAWr69OmJDvyMM86I9EVNlnv11VdbYU488UTvni1uR1jkoSYrPt1zZaY9QGg2kv1AonPN3gjltIGyvWzYU089FUr0gMop+qCrRF5//fVtjMUZA9SAFmHlyPN0QHXOrlta9s635FwZA5TvxUlrZmc5LeOjvYnstKTtHeW2G3ltkb5s1CwMI1vnWbOaZEAyfnXU5rhZzl3S6Omnn071Xjbt9sylDz74IJarOFVWJp1prEwAtf/u2up5ZWLC6ljlRU3hm9aJUH1JmYE/5bPp+tY0V+pa/p52Veta1frWtE6EqmvKdxlXVxtYx/OzL19V21S3L0krB1/c6i9M36Hj6aefHv/OmsHryqqrDNx4zO8qzOquF0pj0TIp+05Tph8abH2Qjyv9UP39UJW6pjKru77VVddMW1P0WDQdRl5e/TT+dOzkvdxNT15dUTzdZIByPwDUb59z81mm/zPyquh43fqtNBbNY9n+xOSfY1gCGKDC8kRa/xDAANVwWddlgFI2ND3ffsHUVPQsZ88AeuONN9q8ukvU3XrrrW1+yl5wZd5yyy2pIq644opEfvS1ptbhdZ0GVfXlufnTQ0Kes79k0Yu4OpHB4OyHkrxBBe2vIOOjrTM6xwDV25qQpwP2y5DKO60O2ssSyfCR5WRMOvvssxO6lPcVuT3TR+nIepl46623ErKzXrA0GKwlHGy9tjdd9+XDXbYjr+64a8fPmTPHJ7Z1zW3z8tJu70diGwb1pbnJ03nnnZcan33D3uNLYfXyVdQ99NBDcXwKW7ZdqFsnXK5peqz8Fu1L5LfMwJ/8N13f6uZKXYtaM56arGvSoyr1rW6dqKuuKd9lXJ1tYOjnZ1++qpSx5JUpB1/8mulv9No+zpo1y+fde63OMvBFWIVZ3fVC6S1TJnX1Q4OtDxJX+qHm+6EqdU1lVnd9q6uu+dqdrGtl0iE5efXTjquT93I3PVnvUiYuzYC1+4Ann3zS3PIe3Tiynrddv3nvPG6/lLYShis3Kw3eTHT5c5bSXCaPZfqTNB5cr0YAA1Q1foTuXwIYoBou+zoNUO4gqpbV8G3irCy7X3L4DFBmhpR5SEmbFl0GoSszba1fydRLsYnbHLVmvevcgezRo0e7XhK/ZbA6/vjjY9k+mYkAPfRDy5MYVkUezh544IHYvwlXdqC5h/D0RVLzdEDT983SOyrzGTNmeLnY+4LJX96MSjteo0tpLzX6Glvtk/Gnc9vY4ibINZpplpDPScbll18ey5V85UNfbWc5Gbbt2VhqHzRL0ucUx/Dhw+M48tpFt81LS7viEi/DREdjGHfTlxenSbeMgLa8MsvwuWEnTpxoxBY+1qkTLtcQfYkyVtYAZefR1+bWUd/sOE35UteiyNWJXqlr0ruq9a1OnXC5hqprhRuS/2xyb7fRodvA0M/PvrxVLWO3HPS1fBnnLnmrtsO3/2OazF7sh+qsF+LklkmoulGmH7LzOBj6IHHlmS/5TBq6vfPV8artk2Tauhj62aSuuuZjkXXNTUdeO2wz8dVPN66y7+Vuem666SZXZNtvPc+b8tExbylVN46sPLt+057D1J9cc801iXRotYq09zVXblYa2jL8nwtVddwuS8Mv1LO3kujmMVR/ksaD69UIYICqxo/Q/UsAA1TDZV+nAcr35b++ptfSdWbwWIOnWm/XNsCoE/UZoNyOWrNl5s+fX4mYK1OzjxYtWpQq056lpXQq3S+99FLCf1kDlGZKmQcHyauap0RiBviHPZVdRgYzgJ2WLN13DQ0YoNJo9cb1Ijowe/bs1gu+XvJ9TjN63DZCG4/nOe2zZOqWjlrmUi9TWlZAA/HSN+mXvfyPXjj0hWCWc/dz0guW0igjkQxoagO0L4U7o++qq67KNGzZcWoZPTvtWsZUg5KmDqkNVVx2fVF8M2fOtMW0nbtt3qhRo1rpVtr1p7J4/fXXW5zsJfbE3zjNyLLTpn289PKW5/TyYofT+TPPPJMXrJVnzbKyw2pJRtOP5AqwPNSlEy7XEH2Jkq3ytvMt3c1yA1Xf6uJKXWu2rhmdC1Hf6tKJuupaVr1y79XdBoZ+fnbTr3alahm75aDn+4ULF7pRpf5WP2O3bTrXrIeiru4ycNMRgplk1lUvJNstk4HohwZbHySu9EPN9kOh6lqd9a2uuqY0l3FuOvLa4SL1045fZWG/Z6idznovd9Nz5pln5r4fXHnllYm+QIagLOfGkZVn1+8NN9wQaT9bfRCtjw81y0erSOiDOLc/0vhUmnPlZqXBJyOUjvdif+LjwbXqBKRT7AFVnSMS+o8ABqiGy7xOA5SyokFe39czGszUoKXb2ZvftgFKe6Xoaytzzz5qUDZvOSsf0iyZ5557bur+TnpgsfdyUVqUFxmmFixY0IqqjAHq008/Tcgr8jWSLz/ddk17xbgzP8RKX7e6Bjs37bpvl3HWg64blt/dQ6CKDti50JdwtoHI6Ib2E9KDd56Twdue3WTCy1hj77um62pPtNRKmpNxWHXdDWdkph1lyNHMRi0JUtSp7dQLjmt4Uxzi4V5XfrIMZ1ltXlq67eunnXZay3jmfiFo/OglUy9xPidj4WWXXZao1yac8qEX4jSnr/nsGV4mnI5apkrtbVkXUieyuFbtS5QvvVDYeU4zQHVDfQvJlbr2cdvXuEYP6qpr0rfQ9S2kTtRd14q0I/poqqk2MMTzsy9PVcu403Jw06L+0O7b1a8W+QCryTIwaa7KzMgxx5D1QjI7LZMi7zSSX6QfGmx9kPJNP9Rce2fqRui6Jrkh61vddc1wyDuWTUeV+lnkvTwrPXqG9+1lq+dZe39f84yjo5Z0c593s+Jwn7ez/Nrx+M71cUbaahxZct00pJVhaB0Pqd9Kc6d5LNqfpHHhejUCGKCq8SN0/xLAANVw2ddtgDLZ0RJ7miWgztnX2euaBon1FYwGXe2XUD00ZQ346kGhrMuTqTSkOXvNeZMXpU9L6cmVMUCpszYydMwa/E5LTzde10OjnS/7fPz48blJNgM84po2KyZXCB4GlEBVHTCJ982aMfqkfZ6KONV37bM2cuTIluFGemUbknX9qaeeyv1Kz7cMp0mLjjI0ybCudOmlSmuf68VAA2adOnHULCU7vbbxXl/dyQg/b968zCjy2jw7H75zzWB85513Uuu1wqj99rkJEyZkhlPbn+bSXk5NGjXTrBMXSifyuFbpS5SvIgN/8tct9S0UV+pa83VNelRHfQulE3XXtSLtyEC0gVWen315qlrGVcrBTY+9/2nRZb0GogyqMnPzrd+h6oWRlfWe1EQ/NNj6IHGlH2q+H6qjrpk6EuI9oEr7lzd+oHQWdWXTUbV+5r2X56XH97Gwu/2Cea43R/eD1bw47HYuz6+JQ+9Wmv2kdwnNdNRKE1mrK+TJtdOQVpZ16LjSFUK/leYqeQyp42n8uO4ngAHKz4WrEMgjgAEqj1Dg+00ZoOxka9aPZjjp6xI9EMkIowFafe3Zb05fgGrg2Pz1W/7T8qtOVF/gVBm4T5PN9f4moBcLs/yCls2TrvWCU/uolwK1m2oztVRemeWOeiGPA5XGbtaJogaogWKXFW83c81KN3Uti061e72qE9VyHS70YHt+1odb2mdCf+rf+tV1e73o1X6o27lm6Tv9UBadavd6WS+q5bzz0LyXd86u6ZDod9PEuyM+1VGW4OuOsiAVvUUAA1TD5TUQBqiGs0h0EIAABCAAgZ4k0KsDfz0Jm0RDAAIQgEAbAfqhNiRcgAAEIAABCHQNAQxQXVMUJKTHCGCAarjAMEA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJMPBXEBTeIAABCECgFgL0Q7VgRSgEIAABCEAgCAEMUEEwIqQPCWCAarjQMUA1DJzoIAABCEAAAgUJfPHFF9GkSZPiv48//rhgSLxBAAIQgAAEqhOgH6rOEAkQgAAEIACBughggKqLLHIHOwEMUA2XMAaohoETHQQgAAEIQAACEIAA/xnRBAAAIABJREFUBCAAAQhAAAIQgAAEIACBCgQwQFWAR9C+JoABquHixwDVMHCigwAEIAABCEAAAhCAAAQgAAEIQAACEIAABCBQgQAGqArwCNrXBDBANVz8GKAaBk50EIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQqEMAAVQEeQfuaAAaohosfA1TDwIkOAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgEAFAhigKsAjaF8TwADVcPGvuuqq0aabbhotv/zy/MEAHUAH0AF0AB1AB9ABdAAdQAfQAXQAHUAH0AF0AB1AB9CBLteBFVdcsVVG//u//9vwaDLRQaC3CWCAarj8ttxyy2iXXXaJttlmG/5ggA6gA+gAOoAOoAPoADqADqAD6AA6gA6gA+gAOoAOoAPoQJfrwPbbbx8NGTIkOuSQQxoeTSY6CPQ2AQxQDZcfS/A1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIMASfBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGix8DVMPAiQ4CEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAQAUCGKAqwCNoXxPAANVw8WOAahg40UEAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQqEAAA1QFeATtawIYoBoufgxQDQMnOghAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACFQhggKoAj6B9TQADVMPFjwGqYeBEBwEIQAACEIAABCAAAQhAAAIQgAAEIAABCECgAgEMUBXgEbSvCWCAarj4MUA1DJzoIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhUIIABqgI8gvY1AQxQDRc/BqiGgRMdBCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEKBDBAVYBH0L4mgAGq4eLHANUwcKKDAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIFCBAAaoCvAI2tcEMEA1XPwYoBoGTnQQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCoQwABVAR5B+5oABqiGi38gDFAff/xx9NZbb0Uvvvhi9Pzzz7eOr732WvT+++9HX375ZRuBzz//PFKYvL9PP/00+uKLL9rC+y4Ulblw4UJvmnwyq15T3ufOndvi8cILL0RvvvlmpM6kF10aX5VRllN+08q5V1lk5bef75VtB7JYqd5/+OGH0YwZM1pty7Rp06LZs2dHqr9FXZrOpumje/2TTz4pGlWrTVHb99xzz7X+VO9DuE6YipGbl05/+9pvN19Vy6poOTXZdrt55DcEIAABCEAAAhCAAAQgAAEIQKBuAhig6iaM/MFKAANUwyXblAFKg4bPPPNMNGLEiOgf//hH6t+pp54aXXXVVZFtqJCxKiuMfe+4446Lzj///Oif//xnNHXq1FSaZWWee+650S233BJNnjw5qEFKA7aPPvpodOWVV0annHJKWz6PP/74aOTIkdG9994blRngTs14QzdeffXVtryonJTH+fPnp6Zi0tOTvOEU9umnn04Nx43eIFClHfDlUHXiwQcfjIYOHRrrzQknnBCfqz24/vrrIxm481yZNsFuc8y54v3ss8/yomndV3pMOB3VXnXqqjD94IMPIrUxdlqqnNvttpufUGVVppxU/nW13W7++A0BCEAAAhCAAAQgAAEIQAACEGiSAAaoJmkT12AigAGq4dJswgA1Z86caNiwYYlBTg3WylB02mmnJa6bwc833ngjJqGv8f/1r391NFB62WWXeQ03VWTKiDZv3rw4fZ2eaED22muvTeRfPC6++OLorLPOSlwXl9NPPz169913O42u0XAyMt1+++2RBoBNmZqjDHlpTrpy4403toW5+eabI93D9S6Bqu2Am/PXX389ksHa6JWO0je1NT5jrow8WTMkq7QJJg2SUcTdeuutiXSrbmelLU1mVaaaeWXSHuKYZoAKWVZVyilU251WHlyHAAQgAAEIQAACEIAABCAAAQg0RQADVFOkiWewEcAA1XCJ1m2AevvttxOzE0488cRowoQJiaXlZGw644wzEgOhtgHKINFMB3uQ9LbbbmvJ0QwAGYS0/JYMFa7RQ0aetKWhXJmjR4+OFi1a1JrJIGOPlsi68847E/EqDTISVVkSTuk955xzYrknnXRS9NhjjyUGobUk4YUXXhj7Ubxnn312pXgNy6aOmrlll5k5V1llOdswpzLF9TaBkO2ASGh2o9oSo08y3GpJO2MAUn3XjEt7ZpT8ajZUWltgCLttwh133NEKo3DmT8u7yaii2YkmDTqa+I0s31FhVd/tcDr3tXm+8OZaCKauAUpt0vTp01tLFyqvEydOTKRTS4PquvL50ksvtYzidj58Bqi6ysotp6babsOfIwQgAAEIQAACEIAABCAAAQhAYCAJYIAaSPrE3csEMEA1XHp1GqC0HJU9k0eGIQ1a+pyWgrIHlH2Dse6yVRoY9jkNPNuDojp/9tlnfV5bS3PZftOWwnr88cfbZGpwtlM3ZsyYhDwZn3xOA73nnXdewm8vLUWn8rb5mnMN3MtwmOYeeOCBOJyWKMT1LoHQ7cCCBQsSM5/UbsycOdMLSMu12e2K9E/LPGa5ou2MZMiAY8svYoBKW2Zy7NixWclK3AvF1DVAyahmu0ceeSSuh2KnJUhtN2XKlMR91wBVZ1m55dRU223nn3MIQAACEIAABCAAAQhAAAIQgMBAEcAANVDkibfXCWCAargE6zRAjRs3LjE4qdkwWc6eaeQzQLl7f6QZoBTHddddl4hbBh+fc2WmDWIqrLtslr6479TZS4RpGbGs5bfcL/2z8t1peuoKl7YXlAazNbid5h566KG4/PIMBmkyuN4dBEK3A1ra0RgyddQsxSznGlE021KzkNKc2ybk1TctKWnSU8QApWVB5d+dqaklSYu6UExtA9SoUaPaonfZuQYoBVC6Tf5dA1SdZeWWU1NtdxskLkAAAhCAAAQgAAEIQAACEIAABAaAAAaoAYBOlIOCAAaohouxLgPURx99FGmfJzMwqVkC+ho+y7333nvR5Zdf3vrz7XWkJaeMPB1lsEpzTz75ZMLvpZde6vXqyswaxHzqqacSMssMGNuRi4Odj+HDh9u32861JJbt/5prrmnz060X7FkK9kC18iMjnPTE5+ylv9Jmr/nCca27CIRuB7QspW24kfE2ayadaMjYZBt8pXtZOuW2CVntjOTPmjUruuGGG1p/WYYt+bUNPjJoa0lAu27rfp4LydROj2Z5uq6IAer++++P82AboOouK7ecmmi7XT78hgAEIAABCEAAAhCAAAQgAAEIDBQBDFADRZ54e50ABqiGS7AuA5SWibMHVjXYWtW5A45ZA8PuzBvtbeJzrsysQUwtT2XnSTMpOnHa/8mWo32espwGuG3/N954Y5b3rrpnG6A0k8vdM0ezR3wOA5SPSu9dC90OuEthah+4Is6diZOmd5LltglZ7UyRuG0/9913X1yXX3nllcg1amfNCjRyQjL95JNPIs021J/PGFzEACVDk5FhGwPrLiu3nJpou00ZcIQABCAAAQhAAAIQgAAEIAABCAw0AQxQA10CxN+rBDBANVxydRmgbrrppnigVcaTEMuouQOOWQPDL7zwQiJ+3/JSQu3KzBrEdPczSptVpeX0tKeV+XNnRej+8ccfH6fvtNNOy5zF4e4Zk7eUYcMqlBmdbYCaMGFC255b0o0ZM2a0ycAA1YakJy+Ebgc0+882xr788suFuLhGm6FDh0ZffvmlN6zbJqS1MzLY6M82ungF/uei6v3ZZ5/dSr/i12/XGK0ZoHkuNNOs+IoYoNLC111WbjmFaLvT8sJ1CEAAAhCAAAQgAAEIQAACEIBAtxHAANVtJUJ6eoUABqiGS6ouA5SWxrIHirW8XlXnDjimDQwrnnvuuScRf5rRxpWZNYgpA4qdp7T4NShu+/PtIXPllVcm/EyaNMmLR4PUV111VcKv9j7pFWcboLRUl9zNN9+cyI9mRSmftsMAZdPo3fPQ7cBJJ52U0J2i7co777yTCKf6qdk/Pue2Cb56PmfOnFieZigWcfZSmnfffXcc5IILLohlaXnBvH2kQjONE+I5qWKAqrus3HIK0XZ7EHAJAhCAAAQgAAEIQAACEIAABCDQlQQwQHVlsZCoHiCAAarhQqrDAKUG0DbA6Nw1MHSSTXfA0TcwLLma2XDxxRcn0vD88897o3Rlpg1iarBay/iZfGmgePbs2V6Zisv403H06NFt/twlAjWo7M7mUD7GjBmTK6tNeBddsA1Q48aNa6VMsz5OPvnkRL4effTRRKr12zDM2q8nEYgfXUUgdDvw2WefxTphdCPPWGOAaJaSCWOOWjrO59w2wdfOaMk5I6eoAUrtgC+MvSyf7mfpe2imvvzb1zo1QDVRVm45hWi77bxzDgEIQAACEIAABCAAAQhAAAIQ6GYCGKC6uXRIWzcTwADVcOnUYYD68MMP44FWDaiecsopQXLlDjj6BobV+GqPJDPQq6NmGOi6z7kyfYOYn376aeQuJ3XXXXf5xLWuFTFAyaNtZDHpveSSSyKl4YYbbohOP/30OB8nnHBC9PDDD6cuG5aamAG+YRugxo4dG6dGeTF51lE6Yu9B89hjj8X3swbkY4GcdB2B0O2AlrS0dUZG4DKG7RNPPDERPs1w5LYJqouadag/hdHeRrasNDl2gagNMWHOPfdc+1Y0c+bMRLq0xF6aC800LR5zvVMDVBNl5ZZTiLbb5JsjBCAAAQhAAAIQgAAEIAABCECg2wlggOr2EiJ93UoAA1TDJVOHAUoDtfZAsfY9CeHcAUcNDGsGhJbh0v5MmpXgznzSTBstl5XmXJnDhw+PNDtJ8p566qnWUn72klcyBMmQkjXwXdQApTRp/yObVdq58qpZBb3mbAOUvQyiZqScd955ibzfcsstcfaeeOKJ+B4GqBhLT52EbgdceaorZdwZZ5wR65TCakk8n3PbhLQ6aa4XMUCpLTH+zVKUJm61JbaxWW1W2r5SLoNQbatJi3vs1AD1/9u7E5g5ijvv4+TlCkKAQIDYiCAIIkGQIAIogSRKYFlxLAli2WjJEhBHljMQ2GxCLkKCOQw+sTE2AQzG4ANjG4NjzGGDjY8YsA0Ym9tgsHEwl8H4RJh+9Zvd6lTXU93TM3083c98W3o08/R0V1d9qrr6mfo/1e3mU2VvZUlTV2495dF3t5JHtkUAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr4gAlAZkzWCrXvMaJHUHHO1j+N4rGKUASNLSSpqaWRV32y77GLo1n4Jh5kfBLN+iY99yyy2hlWZ0/OUvf2k8I0l51+92uQYNGhRoBkSdlrgAlMpgPxPHlFMBOS32gD0BqDrV+D/ymnc/4M4UUjC4lcUNarz55pve3d0+Qc8yUh+mHzcNtds0Aajhw4eH57LvuVX33Xdf+LnSfO2117x5y9vUexBrZbsBqDLqyq0n04f4XtP23VbReYsAAggggAACCCCAAAIIIIBApQUIQFW6eshchQUIQJVcOUUEoNzbL2kGUR6Lb8BRg8O9evVq/OgZTSNGjAgmTZrUmMWU5phumn379g00WDxs2LBAaduDmQoKJc2mSnM8s82aNWsisx769+/fmA1lPterZnfZA9fKi27fFTc7wt63Ku/tANQjjzzSJVvu7RLlrhkhCxcuDO0JQHVhq8WKvPuBVatWhW3CnJe6tV2aReeMG9CNO5fdPsG91afOXc22MXloFoBSwMlsqwCzb1myZEm4jbZ98MEHfZsFeZt6D2KtbDcAVUZdufVUVt9t8fAWAQQQQAABBBBAAAEEEEAAgW4TIADVbfQcuOYCBKBKrsAiAlDuYK8CRJ9//nnmkrkDju7AcDsHcNO0nyOycePGLs9+UqDIflZRO8fUPnfddVdkwNnM/HHTUxBKQSczgK1XzQ6qy2IHoB5++OEu2daMLjfQp+c/Pfvss2GZCUB1YavFirz7Aff5RzoXPvjgg1QWmrlon0N6r3PLt7h9gq+fWbRoUZheswCUbtlpH1uBcvdHt92ztxkwYIC3z8zb1Fd+e127Aagy6sqtp7L6btuH9wgggAACCCCAAAIIIIAAAgh0lwABqO6S57h1FyAAVXINFhGAUhH03+j2gGqaW9c1K7o74OgbGG6Whvu5m6Y9iKltFYRS0Mkuy/jx491kWvpdz3KyZ2Pcdtttifvbz0NSPkaNGpW4fZU+tANQDz30kDdrs2bNivhef/31wVNPPRWuIwDlZavFyjz7Afe80bmgW72lWex2qP10+764oLjbJ/j6mfXr1wd33nln48d3Sz2TJ83mc/sPuy9Jeq98+JY8TX3p2+vaDUCVUVduPZXRd9s2vEcAAQQQQAABBBBAAAEEEECgOwUIQHWnPseuswABqJJrr6gAlG5XZw+uvvjii5lL5g44+gaGWz2Im6Y7iKn07NkOpkwa0G53eeuttyI2DzzwQGJS7nNfdPuvuiz2wH9cAEqzOtxZXnrelbEmAFWX2u6az7z7gaFDh4btQu1Dgco0i9qQaU961QzEuMXtE7L0M6+++mp43JtvvjlQ2nE/kydPDrdVHmfOnOnNYt6m3oP838p2A1Davei6cuupjL47yYrPEEAAAQQQQAABBBBAAAEEEChTgABUmdocqycJEIAquTaLCkBplpA94Pvoo482LZlmJKxduzaIe66LO+CYZWDYZMZN0zeIqXzdcsstkfJoEDhuBoVJO+7VDWhNmDAhbtPGes22sC11e666LGkCUCrLK6+8EimjXV4CUHWp7a75zLsf0PPd7LahZ6SlWdzb4M2bNy92N7dPyNLP2OWfM2dO7DH1gX2uqIxxz4uy09R2efStcRnLEoAquq7ceiqj745zYj0CCCCAAAIIIIAAAggggAACZQsQgCpbnOP1FAECUCXXZFEBqKVLl0YGivv16xdopkvS8vrrrzf2ufrqq73PdnEHHLMMDJt8uGn6BjG17WuvvRYpjwZ+Fy9ebJJp6dVNa9y4cYn7r1y5MnLstIPuiYmW9KE9qB43A8pkZezYsZFymkADASgjVL/XvPsBX6BSzxtKWnQbPAVtTXvSa9Kzo9w+od1+RoFj3erPHDfpmMq/+kf3WVC+suVtmmSXJQBVdF259VRG351kxWcIIIAAAggggAACCCCAAAIIlClAAKpMbY7VkwQIQJVcm0UFoFQM3XLKDL7qVTN/khYFYsz2uk2du7z99tvh59ruvvvuczdp+Xc3zYkTJ8amMWLEiMjxb7jhhkDPOnEXDRor2GJ+NBBrL3oelimnXvXMIz1rKm5ZuHBhZPt2B8Tj0i9yvR1smzJlSuKh5KLgo22j9wSgEtkq/2Ge/YCCSQMHDoy0kWYzgJ5//vnI9nGBCgPp9gnt9jOa8WTa8pAhQ0zyia+jR48O99G+CgD5ljxNfembdbNnz47kp5Vzsei6cuspj77blJtXBBBAAAEEEEAAAQQQQAABBKouQACq6jVE/qoqQACq5JopMgDl3mruuuuuCxRk8C3uf8v7AlBmhpQZ1L377rt9SbW0zk1zzJgxsfu7z2JSPqZPn95le3fA23eLvdtvvz0ysDtjxowu6WjFmjVrAgW6TJn1qv/8r8uyZMmSMO9JA8SmPHKwy6r3rQx6m3R4rY5A3v2A3aZMW5k/f763wJp1pH7HbKf369at825rVrp9Qjv9jALT9qyrtGkomGbyqte42/DlbWrK7r66+Zk7d667SeLvRdaVW0959N2JheFDBBBAAAEEEEAAAQQQQAABBCokQACqQpVBVmolQACq5OoqMgClW0ppUNAeUO3bt2/j1nXmdnwaDH7yySeDXr16RbbzBaDcwVDNllGAJsviptmnT59g06ZNsUnas7RULuX7pZdeimyfJgClIJIsbJupU6eGg+PyWb58eZdZZNqmTssDDzwQllGBNF0ckxZ9PmjQoHAf+RCAShKr/md59wMqsc4D+9y58sorAwUvdV7p2WxqR2o3Op/Ndtdcc03jVprNxNw+Qf2M71Z4Selotp85rl579+7tnS3ppuH2l9r32WefdTdr3K7P3TZL39rlAEHQMNTMLbscuv2n6bt9+/jWFVVXbj3l0Xf78s86BBBAAAEEEEAAAQQQQAABBKooQACqirVCnuogQACq5FoqMgClomgw2DerRYEbDcrag5v2ezsA9cknnwSaQWB/bt7r2VLTpk1rWS0pzcGDB8c+30kzuDSQbY6vV5VFgam1a9c28pEmAKUNNajt3kpL6Wkg1b0VnQbYNZArzzosq1evDu64446Ik8p20003dQnYueVRQM/2JQDlCtXv9zz6AbfUegabPbvJtBmdOzpfzO96VT+hZ6klLUl9gm6Tmeb8U0B91KhRkWObfPTv3z946qmnvFl49dVXA3dWpNlP/YsCue5ShKk5hmaU3Xjjjd5yqM9SH9fKkmddJdVT1r67lTKxLQIIIIAAAggggAACCCCAAALdKUAAqjv1OXadBQhAlVx7RQegTHF0i71777030AChGVh1XzWYfOeddwb6z3Z7ZpOCGe6Asr1v2uermLzotVmaykPcYj/bxeRD+TOzJNIGoJS+ZlvpNn4jR45sPAvKpGdeZaJn0Oj2f3VaNBPFlMF9nTlzZtOimEF8ub7xxhtNt2eDeghk6Qd8JdR5PGnSpGDYsGGNQLDaix0g1voFCxakmn3UrE9QO242++edd96JbffaX/2bb3niiScS91M/ELfkbarjaKaTe97av48fPz4uO7Hr86qrZvWUpe+OzTwfIIAAAggggAACCCCAAAIIIFAxAQJQFasQslMbAQJQJVdVWQEou1gbNmwINMNp2bJljVtmaVaRZg7UZXaPXZa83yuIpYDL22+/3Qhobd68Oe9D1CI9XUQ106HZ83pqURgy6RXIux9QcGjVqlWN58y9//77TW/36M1UzVfmbVoUB3VVlCzpIoAAAggggAACCCCAAAIIdIoAAahOqWnKmbcAAai8RZuk1x0BqCZZ4mMEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBGAECUDEwrEagiQABqCZAeX9MACpvUdJDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSKEyAAVZwtKfdsAQJQJdcvAaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVc/AaiSwTkcAggggAACCCCAAAIIIIAAAggggAACCCCQQYAAVAY8du1oAQJQJVf/EUccEZx66qnBs88+yw8GtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2g4m1g4cKFwcEHHxxcdNFFJY8mczgE6i1AAKrk+tMMqMMOOyzYYost+MGANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA1UvA1stdVWjTo65ZRTSh5N5nAI1FuAAFTJ9XfSSScFZ555ZtC7d29+MKAN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoAxVvA9ddd11w/PHHB5dddlnJo8kcDoF6CxCAKrn+eAZUyeAcDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCDAM6Ay4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQwCBKAy4LFrRwsQgCq5+glAlQzO4RBAAAEEerzA2rVrA/2sX7++x5eVAiJQBQHOuSrUAnlAAAEEEEAAAQQQKFOAAFSZ2hyrJwkQgCq5NrsrALVp06bg9ddfD1544YXG64cffhh8/vnnJZdh2H03AAAgAElEQVS+eof77LPPgnfffTf4+9//Xr3MkSMEchL49NNPg5dffjl49dVXG219w4YNOaVc/WQ6ueyqHdX74sWLGz9r1qypfoW1mUNdW7fYYovg//2//9fo09tMpja7ffTRR8Hbb7/dOJ83b95cm3wXndGe/LdO1eo86znXqX1zp5bbnPudck0y5e2016r1U1Xx59pUlZogHwgggEB2AQJQ2Q1JoTMFCECVXO9lBqBeeuml4MILLwy+/OUvB1/4whcag3MaoDM/ystxxx0X/OlPfwr0hdgsCsbstttuTX/22Wef4JBDDgl+9rOfBbfeemuwcuVKk0SX11bTPP3004OhQ4cGb7zxRpe0sq5YtGhRcM455wSHHXZYsPXWWwdbbbVVw2T33XcPTj755GDixIlZD1Hq/ipHXH0deeSR3ryovuL2GT58uHcfVtZL4L777gt+8YtfBIcffnijnZvzXq/qDw488MDgv/7rv4Lp06dHCvbf//3fsW1jjz32iGxrflEfEteerrjiCrNZY7A8bjt3/b777hsceuihwU9/+tNgyJAhLfUF7ZR93bp1wV577RVbDjd/Sb/vueeeLZfZ9Kd59n0rVqyI9P39+/cP8xX3prsdklynTp0al+3ADIarffv+oaDVa1Cz61qr6eVRrzNnzgz+7d/+Ldhxxx0b1yxz7dpyyy2DAw44IBgwYEDwwQcfxBrpA/3zidpakvPee+8dHHTQQcEJJ5wQ/PnPfw4mT54cfPzxx4npmg+L7D/MMdzXdv7WKaqdu3nL+nsedZ62rca1CfXFvqXZOefbp52+ueg21YpPu9eldspdVBtNW16uSc2/ByVdk3zt37eu1fpodm3SMVpNM+v1KY9+imvT/34PL+q897W9LOvyqPO07bTVa1OWcrEvAgggkEaAAFQaJbZBoKsAAaiuJoWuKSMApT9ezzjjjMjA47bbbtsIFu2yyy5hAMoekF66dGlYbu3/hz/8IdA+9jbN3m+zzTaNwSrff2O3m6b+m/2iiy6KBMjCjLb4ZuPGjUGvXr0a/yFvyrLddtsFP/jBD4L9998/Utbf/e53gWZH1WG56qqrAg1AmjKZVw0i3n777d4iPPfcc8HRRx/dZZ+vfe1rwcJnFnr3YWU9BDS76bzzzovU7c4779wIOJkBa9NG9PqrX/0qUrBHH300OOKIIyL7azulobbmW6ZMmRJ885vf7LLPd77znUBfUs3Sbj+g46svUF6TzsssZdd/7douWd7L2SztljmPvk8BJ7sc3/3ud022Yl+r5mDnf9SoUbH5bjYY3m49xF3X2k2vnXrVLF1dp2wLpfONb3wjDEaZz5TfsWPHxjop37///e9bvr7vuuuuwejRo5vOnC6y/3ALpbK0+7dOUe3czWO7v+dd5+38TWfalF59S7Nzzt4nS99cdJtq91yWS7PrUpZyF9VG2y1vO32X3Qb0vpOuSW7Z435vtz7irk06TrtptlrHefdTXJuCoKjzPq79tbo+7zov4trUapnYHgEEEGhVgABUq2Jsj8D/Cvi/VaJTmEDRAajVq1cHmvViBg50vJtvvjlQJ2kW3Y5J/8VpttGrHYAy2911112RbS6//PJAQRylpf9aevrppxszrPSFxU6rT58+Jokur26av/nNbxrP7dCX9FdeeaUx+0jBHzfNSy65pEtara74j//4jzCfCjxp0NwOlj300EPBF7/4xXCbc889t9VDdNv2Y8aMCfNt6uLuu+9OzI9ux2a21asGOHl+SiJZ5T/UF9dvfetbYb1qlpNmB5hFwRvNLLTPLzcApW11nmvA2W4fGgRMWvQ8EHsf9TH2zEp7X7cf+O1vf9v40q3j6jYl6l/mzJkTnH/++V2Cq7/+9a/tpML3WcvufunX4P68efMa/ZP6iXHjxkU8Zs2a1eg/tN9jjz3WmD1lvOwAlMmgW+ai+z7dYvWrX/1qJM+a+fbOO++YLHlfy3ZQUFO+9o/a6apVqwJdS4ypXrMEoExh3XrIel1z08u7XpctWxapR81o1owkXevNot+13ra66aabzMfeV10f7O01U1Hnnq7vurXfk08+Gfzxj39szJSyt9PfF0uWLPGmaVYW3X/oOFn/1im6nRuLdl6LqnO3rfrOPQ1cP/PMM8H3v//9SPvwlSNtAErWWa9LZbQp1yfrdSlruYtuo2558+673DbTadckt/zNfnfrI+u1Scdz08yzjovqp7g2Rf8ZKu+/RZu1w6TPi6pzt51mvTYllYHPEEAAgTwECEDloUganShAAKrkWi86AKVb6pnBIg2Czp0711vC999/P7BnQ/kCULpPu0lLr1dffbU3LQ1M29tpcDtuFo2bpm7v41t0Gzx7kFzpNxsA96Vj1mmAzs7j/PnzzUeRV/3nuNlOx9dzs+qy6D/BTd71es011yRmXQP8Znvd3kBtgqXeAgMHDgzrVDOWdEsT36IglKl7XwBK+/zoRz8Kt9G2Gphstpx00knhPpqFFbe4/UBc36L93cEI9WsKTrhL1rLbg32aUWgH7nSse++9NyybPGbPnh3Jgn0++QJQbpmL7vsUPDN1bL+OHDkykm/3l7Idkupeefv5z38eliOPAJRbD3HHT3tdc9PLs14ViLNnFu6www5d2qWpP12r9LmpawUbFUiIW9x8xznonxLOOuusMF2lv9NOOzW91V+R/YfKlPVvnaLbeZx7s/VVqHPlUYFquz358p02AJW1bzbHLrpNpT0nlJ8016Ws5S66jbrlzbPvMnVmv3baNckue5r3bn3E9clpr006pptmXnVchX6Ka9P/3lK/1b9F07RF3zZVqHPlK821yZd/1iGAAAJ5ChCAylOTtDpJgABUybVdZADq/vvvjwwS6bkpScu1114bbu8LQOn5IWYwS69xX4Z0DHuQUNvqi7dvcdOM+zKkffXff/bx9ZyZdhb9Z/2XvvSlMK0f/vCHscnoD2w9B8YcV8/QqcuyfPnyQLfkMHlXgDEpaHD22WeH29btuVd1qZOy86kZT6b+dU//uOWTTz4Jn5kTF4A65ZRTwrSUZprltNNOC/f5n//5n9hd3H4gqW9RIrp1nCmXXjXjyF2ylt0e7NO54S7NAlDa/thjj23k0xeAcstcdN9n+uR/+qd/isxi+fGPf+wWLfJ72Q7N6l7PATR1n0cAyq2HpOMbQ3N833XNTS/Per3tttvCsse1e7vy3DZ61FFH2R9H3rv5TnLQjnoOj3HQ6wUXXBBJz/2lyP4jj791im7nrkfa36tU57o1kqlzX/7TBqCy9s3m2EW2KR2j1XOi2XUpa7mLbqNuefPsu0yd2a+mP+2Ua5Jd9jTv3fpI6pONpTk/fdcmHdNNM686rlI/xbUp+s9Qqvekv0XTtEXfNlWq82bXJl/+WYcAAgjkKUAAKk9N0uokgXSjip0kUnBZiwpAKXCiZ/6YLyP6D+Wk4IOKqVkExxxzTPAv//IvwcqVK7uUXP9lZNLTa9KXIXeGkW8AVwdw00z6MvTXv/41cvwTTzyxSx7TrLAHMFUO/edq0nLhhReGx916660btyVK2r5Kn+k2SnadxQUhFXA0M8z0n+Qs9RdQoFWBD1P/upVl0jJixIhG4Hj8+PHezf7zP/8zTEttJc1iz8K77LLLYndx+4GkvkWJXHrppWFeVL577rknknYeZbcH+/TAeHdxB/fd/zrV9rfeemsjn74AlFvmIvs+9f1mcFi3UdNz9Ey7UJA66dpQtkOzuperbhv0k5/8JHjiiSfcagl/N+VVOXULx7jFrYek46e5rrnp5VWvui3tjjvuGNbbV77ylbgihev1Zcj+ZwtZPPXUU+Hn9hs330kO2k+32LT/OUNpx82w1vZF9R95/a1TdDu3rdO+r1qd6za9Ou/041vSnHN59M3m2EW1KZN+q+dE0nUpj3IX3Ubd8ubVdxlP+7UTr0l2+dO8d+sjqU9Oc23SMd0086jjqvVTXJu6BqCS/hZN0xbdbapW582uTW7++R0BBBDIW4AAVN6ipNcpAgSgSq7pogJQGggyA4x61eyhrIv7xSXpy5BuV2Uf/3vf+5738G6aSV+G3MDRoYce6k2z2cqpU6dG8jZjxozEXa6//vrI9rrndV0WPRfDHrTUbdg028VdTj311EYZFVhwbzXmbsvv9RDQAI99Diq4nGUpcrDP7QeS+haV4eKLL46Uzb2FZh5l1zNGBgwY0PhZs2ZNF7o0ASg9nFlpDBo0qMv+bpmL7Pvs51UtWrSo8bw7u22oT4xbynZoVvdx+XTXpxkM1z5uPSQdP811zU0vr3qdPn16pM3rnwvSLL169Yrs17t3b+9ubr6THEwC7m3H9JyguKWo/iOvv3WKbudxLknrq1jnSflNc87l0TebPBTVpkz6rZ4TSdelPMpddBt1y5tX32U87ddOvCbZ5U/z3q2PpD45zbVJx3TTzKOOq9hPcW2KtrCkv0WjW6b7rYp1ni7nbIUAAggUI0AAqhhXUu35AgSgSq7jogJQ+lJhDzA+8sgjmUvmfnFJ+jK0YMGCyPHzmAGlgVO7THH/hdusoJoFZKfjDl67+48ePTqyfdJ/ebv7VuH3wYMHR/J/3XXXRbJlD+BpZgFLzxHQw4pNW1dw8fHHH2+7cEUO9rXSt6gAGug25dKsRA3MuUueZXfT1u9pAlC+/cw6t8xJA0FZ+z7zfJyvf/3rjcPrP4TlZgx1+552l7wdkq4rreQxzWC40nPrIen4aa5rbnp51atmEJr60muz65ax0rXf3i8uSOTmO8nBpP3ee+8FeraUSV99jO8fHLR9Uf1HEX/rmPLZr1nbuZ1W2vdVrPOkvKc95/Lqm4tqU6aMrZ4Tza5LeZXb5M99zdpG3fLm1Xe5+dTvnXhN8jkkrXPrI6lPTnNt0rHcNPOo4yr2U1ybklpW9s+qWOfZS0UKCCCAQPsCBKDat2PPzhYgAFVy/RcVgDrssMPCQSENDn3wwQeZS+Z+cUn6MnTLLbdEjn/jjTd6j++mmfRlaOzYsZE0446vL2Jf/OIXwx/3P74nTJgQSWfKlCnevJmVM2fOjGyvL/l1WnSrhD333DMsg9qcaQ+6Lcy3v/3txme77rproFu8sPQcgUsuuSSsd/UDqvu4W3A1K3WRg31uPxB3biuP7n9ennXWWd6s51l23wHKHOxL2/f58vnmm2+GQYJ+/fqFm+jZdyZwsNtuuwXqC9pZ8nbw1f3nn38eHHDAAcH2228f+G516Mt32sHwVtpemuuam14e1zSVzx68Vr19+OGHvmJ3Wadb65p61qtuuehb3Hz76sG3n2Y32+nH3RaxqP6jiL91fOXM2s59aTZbV4U618wdPaNHs6f1XMmkJe05l1ffXFSbMmVs5ZxIc13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51HEV+imfJdcmn0o+66pQ561cm/IpNakggAAC8QIEoOJt+ASBJAECUEk6BXymL+x65lLeyw477BAOCu2yyy65JO9+cUn6MqQZT2kGpdw0474MaXD0hBNOCNPUIJrvOVUqqBswcmf1vPLKK2E6yqMboHKx5syZE9k+Lpjm7lel393bUeiBrVrGjBkTlm3kyJFVyjJ5yUFAAz0KLNrnomYp6HlQaQewTTaKHOxz+4G4vuWxxx5rDIaa8hxyyCGxzy/Ks+zGwH4ta7Cvlb7Pzp95b99CVLcxNYs7YPX000+bj1p6zdvBV/cKapg6V/+eZkk7GJ627emYaa5rbnp5XNN0bLs8OodbCRjafw/IUQM37uLm21cP7j76/YILLgjrRmnr1lq+paj+wy5bXn/r+PKftZ370my2rgp1/uCDD4b12+z2w3Z+k567llffXFSbMvWS9pxIe13Kq9wmf+5r1jbqljevvsvNZ6dek1yHZr+79ZHUJ6e5Nul4bpp51LF93nNt+ketcm36h4X9znbJ8vdIK9cm+/i8RwABBIoQIABVhCppdoIAAaiSa1l/uOcdgPr000/DAQP9cbf//vvnUir3i0vcl6G//OUvgb6E6Nj6OfbYYwM9KNy3uGnGfRmyv7Aqzb59+/qSa6xrFoBSXrbddtswf/rvf80SilsUwDJl0evw4cPjNq3sepXZ/o81BfD00FbzgPrDDz+8pQHNyhaUjHURePLJJyO3WzNtWX1P//79vYPRXRIp8BZaOpbbD/zrv/5rcM899zR+dL79/ve/D44++ujwPNRtv84999xAszuSlrzK7jtGWYN9rfR9bj41c2jfffdtuLm3XnOfqXfllVe6u6f6PW8H5dPUvWZ+9enTJ1BgwbTb7gpApb2uuW05j2vapk2bwvLLQUHlVpavfOUrkf1XrFjRZXc333HXd3fHP/7xj5G0Fdj0LUUEC4r6W8eX/6zt3Jdm0rruqHO1Vd3OVD8KUi5cuDDsP9Tu8gpAqdx59M1FtCm7TtxzIo/rUh7ltvNov8/aRt3y5tF32fnT+06+JrkWzX536yOuT057bdLx3DSz1nF39FNxDq4n1yZXJJ/fu6POs16b8ik5qSCAAALxAgSg4m34BIEkAQJQSToFfFZEAEr/eWoG6/T63e9+N5ecu19czJcA/Sf2W2+91bg11i9+8YvIsRXc0X5xi5vmhRde2NheAx16xsX48eMjz3vRrfUGDhwYl1xjfbMAlDY69dRTI/nU7+5/heu/Vc8444zIdvK8//77E49f1Q+nTp0aKYv97I52b8tW1bKSr6iA6tcEIuy+Qe8VhJw8eXJ0B89vRQ72uf2Am0f399NPPz1YvXq1J5ddV+VR9q6p5v8MqDz6Pjeful2dsbvtttvcjxu3tTOfK0DdzpL3oKfJT9xrWQGodq9rblvOo17da/pee+3VUlUdeuihYTuQ65IlS7rs7+bbXN+7bOisuOGGGyJpDxgwwNnif38tov9wXfL6W8dXgKzt3Jdm0jq3bGXUedw5Z9bnGYBS2bP2zUW0KbtO3HPCOMS9pr0uZS23nUf7fdY26pY3j77Lzp/ed/I1ybVo9rtbH6ZPbvfapOO5aWat4+7op4xDMz+uTc2E2vu8O+o8rs8165tdm9orKXshgAAC6QUIQKW3YksEbAECULZGCe+LCEDpDzHzR5ledR/sPBb3i4vS3n333RvPlLCPZ95rZtczzzyTeGhfmmZ/9/Wiiy5KDGaZAykYpi8e5sc3YLlmzZrg4IMPjjjpOUkKRF166aWNWWn2LC47L3PnzjWHqt3rkUceGSmzynXOOefUrhxkuHUBBVg1c1B9jt2ezXs9VDhpKXKwz+0HNAvmiiuuaPz88pe/DM4888xgv/32i+Rbwei423255chadjc9/Z73YJ+pB99r2r7Pzef555/fMFNfpodyu8vll18eMbVv0eduG/d73g4/+MEPwrrXfzCfdtppkWuMrz/35c1u5xowiVvctif/LNc1X3q+OtW6tPXqzlbbZ5994orjXX/EEUdE6nnp0qVdtnPznXaQTwEnu3y63atvKaL/KOpvHV/+s7ZzX5pJ67qjznXOfPnLX2786LlPdr3qfbNBvrTnnF3uLH1zEW3Kzpt7TuR5XcpSbjuP9vusbdQtr1v/9u9p+y47f3rfydck16LZ7776yHJt0vF8adr1ar9PU8fd0U9xbYq2nKznfTS15r91R51nvTY1LxVbIIAAAtkECEBl82PvzhUgAFVy3euPqrxvwacvtvaXiHb/s92l8H1x0a3sttxyy2CrrbZqzLA4/vjjg4svvrjxn63u/r7f3TQ16KEv+fqPbXswQ+X59a9/Haxfv96XTFvr9EBtDbbYVvZ7zd669dZbG8e11+sZUnVd5s2bFynvdtttlyqoV9fyku+uAgpE6LaSOmftdq33SbMLixzsc/uBuAGGGTNmBNtvv30k363cErPdsndVzD8AlXfft3bt2kBBOtXrSSed5CtC5L/Rtd3QoUO92yWtzDr4kabuFWg0bbWMAFSW65pbnjzq9d133w3LLwflr5Vljz32iOzve/6bm++4c9A9rnubo1mzZrmbNH4vov8o6m8dXwGytnNfmknrqlDnaid28LKIAJQxaKdvLqJNmfzoNe05keW61E657Tza77O2Ube8efRddv46/ZpkW6R579aH6fur9J2rCv1UnCXXpjiZbOurUOetXpuylZi9EUAAgeYCBKCaG7EFAj4BAlA+lQLXFRGAUnZ33HHHcMBJQZY8FvfLUNoBqqRju2na9yNXsOlnP/tZWA59+VKwLu55UknHiftM97KeMGFC8Nvf/jb46U9/2viP9CFDhgSPPvpooC/LWn70ox+Fedhpp51yPX5cvopcbw9GnnjiiUUeirQrLKBAqmZHmoF981rmALLhcfuBpL7l5ZdfbjwDx+RXfai+ELeytFp2X9p5D/bl3feNHj06rNu99947+PGPf9zlx+7b5HnMMcf4ipq4Lm8HX93rj/qdd965UZ6iA1C+4ycCOB+6bTmPetUzCk17N68KvqRZ9Dwf+3armg2n57C4i5vvtA4///nPI3nTrWt9S1HBgiL+1vHlP2s796WZtK4qda7gimlzRQagjEUrfXNRbcrkpZVzIut1qZVym/y5r1nbqFvePPouO4+dfk2yLdK8d+sjbZ+clLabZtY6rko/5Ssz1yafSvZ1VanzVq5N2UtNCggggECyAAGoZB8+RSBOgABUnExB64sKQB1yyCHhoIEGnPQHY9bF/eJS9Jch5VcPGf/6178elkUDIbq1XlmLgl277LJLePxzzz23rEMXdhz9V6sZUCIAVRhzLRLWIPbhhx8etge1i9/97nfevLcz2KegrmlrSbf4a7VvOe+888J0lb5mKra6tFJ2X9pFDvbpeFn7PgXrjX3aV10rfLNjfOU36/J2iLuu6Lapypv+wE+z2DNoW7kFX9zx0xxT27ht2R7g0+ft1qtdHtWnbjWbZnn99dcj7UDPfPMtbr7TOnzrW98K01f7iaufovqPIv7W8flkbee+NJutq0Kdq70qUH3cccc1nS1t5zfpnGtW7rR9c1FtyuSv1XMi63UpbblN/tzXrG3ULW9efZfJZ6dfk4xD2le3PtL2yUnpu2nmUcf2ec+16R/6XJv+YWHe5fn3SCvXJnN8XhFAAIGiBAhAFSVLuj1dgABUyTWsP9zzvgWfinDyySeHg0L6QtDsWUxpiu1+cSnjy5Dy9fjjj0fKottKZRncSFNWs820adMix37ggQfMR7V9JQBV26orJOOaPbTNNtuE7fzb3/629zh6VpgJZGhGhW8WhbujZt2YfXr37u1+HP7eat9yxx13hOkqfT0nqp0lbdl9aRc92Kdjttv32ffoP/bYY4Pnn38+9udPf/pTxFLlamXJ2yGP64rybw+KJV0vWm17zWzc9NwBPu3fTr2qHs25pFddm9IsTzzxRGQ/zSr2LW6+09SDAoL27Kp9993Xl3RjXVH9RxF/6/gKkbWd+9Jstq6KdZ6U57TnXFIa5rM0fXNRbcrkodVzIo/rUppym/y5r1nbqFvevPou5ZNrkltbzX936yNNn9wsVTfNPOq4iv0U16ZmLSHb51Ws82wlYm8EEEAgmwABqGx+7N25AgSgSq77ogJQel6SPVh1++23Zy6Z+8WlrC9Dyrhm6tjl0a0VyljOOOOM8Lh6JpT+Q7XuCwGoutdg/vk/8sgjw3au5wv4lquuuircRudi0qC+2f+oo44K97nzzjvN6i6vrfYtU6dODdNVXrLMTExT9i4ZDvJ/BpRvIEjHbafvu/baa0OfMWPG+LIfrnvppZfCbWV5+umnh5+leZP3oGea64puvaLnVc2ZMyc2i2kHw1tte7EH/L8P3PTyqtcrrrgiUk8XXHBBs6w0Ph8xYkRkP91y1re4+U5TD+PHj4+k3bdvX1/SjXVF9R9F/K3jK0TWdu5Ls9m6KtZ5Up7TnnNJadifNeubi2pTJg+tnhN5XZealdvkz33N2kbd8ubVdymfXJPc2mr+u1sfafrkZqm6aeZRx1Xsp7g2NWsJ2T6vYp1nKxF7I4AAAtkECEBl82PvzhUgAFVy3RcVgNJ/vNsBG32hbbYsX7688WyVvfbaK3j//fe7bO5+cSnzy9Bzzz0XKY9u9fPiiy92yWOeK/72t79F/rt77NixeSbfbWkRgOo2+lIPrFmPml15wgknBHrWWdKiwWzTXxx44IHeTd2B7Kefftq7nVm5efPmYPfddw/T1XPV4pZW+xYNdJv86lUDkfaSd9nttM37sgb7Wu375K5nPskl7S311OcbT12TmrUXY6DXvB3SXFcOOuigRn6vvPJKOyuR92kHw1tte5GDeH5x04sb4Gu1Xhc+szCsI9XVDjvskKqe7P9U3mqrrWJvsejmu1k9qI189atfDfO06667hs9N9LAERfUfRfyt48t/1nbuS7PZuqrVebP8pjnn8uybi2pTppytnhNJ16U8y23y575mbaNuefPqu7gmuTWV7ne3Ppr1yWlSddPMo46r1k9xbZqdpilk2qZqdZ6pMOyMAAII5CBAACoHRJLoSAECUCVXe1EBKBVDA89mUFGvS5YsSSydbpFltl+6dGmXbd9+++3wc22nWzdlXdw0L7/88tgk9Z/5Jn961XMJfMtHH30UjBw5MvxZvHixb7PEdbp9w3777RceTzM50txyLDHRinxoP9Pq+OOPr0iuyEbeAjNnzgzb76RJkxKT/+EPfxhue9ZZZ3m3dW8bdsstt3i3Myvt42+99dbBe++9Zz7q8ur2A80GWuxb+6kv0H+72ot97DzKbqdt3tsPVFce5NPK4pY5j75Px3/kkUfCukx7e1c7AKmyPPzww6mLkrdDs7p/+eWXw/INHDgwNp9pBsO1s1sPWa9rbnp51avyeuihh4ZlVz3FzWYyKO7stqSyufluVg/XX399JC+akZa0FNl/5P23jq8cWdu5L80068qs86T2kSavac65PPvmItuUytvqOZF0Xcqz3HF1kbWNulxYpccAACAASURBVOXNq+/imhRXY8nr3frIen762nRedVxmP8W1Kdpusp730dTS/1ZmnefR9tOXjC0RQACB1gUIQLVuxh4ISIAAVMntoMgA1GOPPRYZINIDUTds2OAt4YoVKyLPzPAFoNzBrIsvvtibVisr3TQvueSS2N2VJ90eTANv5sf3TCb7i762+81vfhObpu+DV199Nfja174WHkOBqLQPfPelV6V1+k9UzYowfnqAPEvPFLDPg6OPPjrQH0a+RQ8Ftp8BddNNN/k2Cz777LPgG9/4Rth2NLvpk08+8W6rdvbv//7v4bbNbpHn9gNJAwz9+/cP01U7Vj42btwYyUfeZY8k/n+/DBs2LJKPiRMn+jaLXeeWOY++Twc75phjwnyl7aNV56ZP0OuZZ54Zm2/3g7wdkupex77ooovCvA4fPtzNTvh7msFwbezWQ1qz8EDOGze9vOpVh5k9e3ZkVu52220X6D+Rfcv69esD+yHoe+65Z+ItZF955ZXQVW0grh50bmswyG4v3/zmN2P/tjB5K7L/yPtvHZNn+zVrO7fTauV9kXXutlWdW1mWNOdcnn1zkW1KDq5P3DmhbZtdl/Isd1wdZW2jbnnz6ru4JsXVWPJ6tz6yXpt0NDfNvOq4yH6Ka1NyO8l63ienHv9pkXXuttOs16b4UvAJAgggkI8AAah8HEml8wQIQJVc50UGoDRj57zzzosMFOmLoAac7UW3BvnSl74U2c4XgNKzROxBJwVmNACQZXHT/M53vpOY3C9/+ctIHvbYY4/Gw43tnewv+spvmgCUZjzplnvnn39+ZDD+4IMPDnTLip6yuA+kVzAqzbN8ekr5O6kc7nmgWQJq5/bywgsvBApCmvNa7+OC1NpPt9Ez2+pVt/fS4IC9KCh1yimnhNttv/32gWatJC1uP6DbwtgzDpWn+fPnR4IPOr7arzv7Sccpouxu/t3/dm/1uXRumfPo+8aNGxe6y0cBwzSLG1DQrdrSzhzN2yFukFcBlSFDhkTKp/LGLWkGw7WvWw9Zr2tuennUq11GzTz6whe+EDrsvPPOwbRp0yLnrc43+/lrO+20U9DslpkKoNrntl0PmlWsNG+88cZA10R7u9NOOy3x1nt23ovqP/L+W8fOs3mftZ2bdNp5LarO3ba6zz77ZPqbLs05l3ffXFSbUj25PlmuS3mX29eOsrZRt7x59F1ck3w1lW6dWx9Zr02+Np1HHZvSFNVPcW0ywv7XrOe9P9V0a4uqc7ftZ702pSsNWyGAAALtCxCAat+OPTtbgABUyfVfZADKFOXmm2+OzHrRLCJNndft19zAkxlYsgNQml2gBwhrEMt8bl51i6cpU6aYQ6V+TUrz1FNPDfRcB9+iZ1NpwM0cX68qw6BBg8JZEO4X/WYBqO9973uR9JSmnq/Rq1evYPXq1b5s1G7dxx9/3DCyb79nDDXb67bbbss08FQ7kA7IsHseqL41cK36PvHEExuzI+zZcGoby5YtayrTr1+/QLfUM+1Haei5UbolpvoVO031bwsWLIhNM6kfUPrbbrttJCBsjqlXBcv0HB3fUlTZdax58+YFZ5xxRlh+O08XXnhhsGjRIl+WwnVJZW6371uzZk3Qp0+fLjNElbeTTz45mDNnTnh8+41me/7hD3/w7qd+ddSoUfbmkfdFOajO1RbtH/XHtrN5P3Xq1Eie7F+aDYYn1UM717Wk9NqtV6XpW6ZPnx7stttuERPNYvznf/7nRoDIPgf33XffQPUctyi4q+un7/quQKR+jLf9qnoaPHhwJFAcdwx7fZ79h52u3mf9W8dNT79nbee+NNtZl2edJ7XV73//+96gfpo8NzvnlEYRfXPebSrJR+dAO9elIspt6iRrG00qb7t9F9ckUzutvybVRzvXJuUgKc1261hpukue/RTXpq63wre9s573dlpZ3udZ50ntNMu1KUv52BcBBBBII0AAKo0S2yDQVYAAVFeTQteUEYBSATQAefbZZwcHHHBA5L+n7QElBXb0LBj9l6cCFmZR0Mf+j2t7H70//PDDzaapX5ulqYBX3DJ27NguA2IacDOzO9wv+s0CUOecc06Ynh6qrv+yj7u1WFyeqr5eM13cerN/1yCj78tk1ctF/uIFNLPtmmuuaQSb7MFlncv2+aw2r+e3aMAo7aIAtf7r0h5wtAe9dbsv/WekzvOkpVk/YNqojqN+Rn2YnlWn5zolzb4ssuw/+clPEs+lX/3qV0lFbpjY/qaM5rWdvs9+LpJJx36Ne66Xe+sWex+933///WPLUrSDmxff77NmzYrNn902fbM8m7W9Vq9rzdJrp17NNc1XyFWrVgW6fZICsSYgbM5BtS/9Z7tmH+iB7EmLgs4+W7NOgS3NYtx7770bs6ouvfTSYPLkyZG/EZLS932WV//hSzvL3zq+9LK2c1+a7a7Lq86btdUdd9yxrSw2O+eUaFF9c55tqpmPOTdauS4VVW6ZZm2jzcrbTt/FNamtU6ixU7P6aPXapESbpdlOHcddn/Lqp7g2/eN7uK81ZT3vfWm2uy6vOm/WTtu9NrVbLvZDAAEE0goQgEorxXYIRAUIQEU9Cv9NX2DTPig+r8ysXbu2cTuruXPnNmYa6Y/8uC8SeR2zyuko2KTbE9lBtyrnl7wh0I7ABx980JgJoVvZLVmypHHryqxBR936Ss+PU1+iNHU7vnXr1rWTvUL3KaLshWaYxDMLpBkMz3yQiiSgLz16ZsKLL77YOK+bBZ0qku3G7Kki+4+e/LdOFeu8nXMu776Za1JVzm7ygUDQePYo16auLYFrU1cT1iCAAAJ1FiAAVefaI+/dKUAAqmT97ghAlVxEDocAAggggECpAu0MhpeaQQ7WowX0RVTPldSMrFZ+6nzbX865Ht2kKVwLAp14/rfAw6bdLED77OYK4PAIINDjBAhA9bgqpUAlCRCAKgnaHIYAlJHgFQEEEEAAgXwEGAzPx5FU2hPQ8+nMrdpaeZ0xY0Z7B6zAXpxzFagEslAJgU48/ysBTyZSCdA+UzGxEQIIIJBagABUaio2RCAiQAAqwlH8LwSgijfmCAgggAACnSXAYHhn1XfVStuJA3ycc1VrheSnuwQ68fzvLmuO27oA7bN1M/ZAAAEEkgQIQCXp8BkC8QIEoOJtCvmEAFQhrCSKAAIIINDBAgyGd3DlV6DomzdvDl5//fWWf9avX1+B3LeXBc659tzYq+cJdOL53/NqseeWiPbZc+uWkiGAQPcIEIDqHneOWn8BAlAl1yEBqJLBORwCCCCAQI8XYDC8x1cxBayYAOdcxSqE7CCAAAIIIIAAAggULkAAqnBiDtBDBQhAlVyxBKBKBudwCCCAAAI9XoDB8B5fxRSwYgKccxWrELKDAAIIIIAAAgggULgAAajCiTlADxUgAFVyxRKAKhmcwyGAAAII9HiBMWPGBCNHjmz8rFu3rseXlwIi0N0CnHPdXQMcHwEEEEAAAQQQQKBsAQJQZYtzvJ4iQACq5JokAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5OonAFUyOIdDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQyCBCAyoDHrh0tQACq5Oo/8cQTg7PPPjsYNmwYPxjQBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtIGKt4GhQ4cGxx13XHDZZZeVPJrM4RCotwABqJLrTzOgDjvssGCLLbbgBwPaAG2ANkAboA3QBmgDtAHaAG2ANkAboA3QBmgDtAHaAG2ANlDxNrDVVls16uiUU04peTSZwyFQbwECUCXX31FHHRWcfvrpwaxZs/jBgDZAG6AN0AZoA7QB2gBtgDZAG6AN0AZoA7QB2gBtgDZAG6ANVLwNzJgxIzjiiCOCSy+9tOTRZA6HQL0FCECVXH88A6pkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkEOAZUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz9BKBKBudwCCCAAAIIIIAAAggggAACCCCAAAIIIIBABgECUBnw2LWjBQhAlVz93RGAWrduXbBy5crgxRdfDJ5//vnG6xtvvBF8+OGHweeff95F4LPPPgu0T7OfDRs2BJs3b+6yv29F2jQ3btzozZMvzTzXvfvuu8GSJUsyH/vjjz8O/v73vwevv/56sH79+jyzmJhWnK/qKGnRxTOunvUZS30EursNqC/46KOPgmXLljX6m1dffTXQeaVzOs2i7eLaYqvrff1amjywDQIIIIAAAggggAACCCCAAAIIIOATIADlU2EdAs0FCEA1N8p1i7ICUBqMfvbZZ4OhQ4cGf/7zn2N/rrvuuuCuu+4K7ECFglVJ+9ifXXnllcFNN90U3H///cErr7wSa9VqmoMHDw4mTpyYS1AoNlNB0Cj3ww8/HPTq1atR5pdeeilpc+9nslYaN9xwQyONq666KvSTzYwZMzIHtrwHtlYq4GXXi3nfu3fvYM2aNdaW0bcLn1no3U/7P/PMM9GN+a3SAt3VBhRonTVrVtC3b9+wLdnngPqIsWPHBgp6xy2rV68Oz0HTdrO82v1Z3DFZjwACCCCAAAIIIIAAAggggAACCKQVIACVVortEIgKEICKehT+WxkBqPfffz8YNGhQOBisgVwNCCsYcv3110fWm0Het956Kyy7Zhs8+OCDbQ0I33777d6ZP1nSVBBNM4vyXDRD4rnnnosMmsui1QDUp59+GowaNSpi2r9//6Bfv36RdZMmTUo9W6ydcirI9MADDwQa7Dd1al4VyItb1FbGjRvXZZ/x48cH+oylPgLd0QbefPPNQEFs09b0qjao/kfBT3u93itQ7Zs1+cEHH3TZ1t23ld8JQNWn3ZJTBBBAAAEEEEAAAQQQQAABBOogQACqDrVEHqsoQACq5FopOgCl27/ZMxGuvvrq4Iknngjs26kp2OQGSOwAlCHRrAZ70FdBFKWjGT8KCOlWWwpUuEGP0aNHx874cdOcMGFCsGnTpkCBnFWrVgWLFi0K/vrXv0aOqzzceuutkTKYPLbzKqPhw4d3OYaO89prr6VOUg52OnJfvnx5o+z6zC3Hfffdlzrtdjd89NFHveVSXSUtqjNT16pTlvoKlNUGNONR/YtpNwpu6xafCjZrUZBXszDt/kjbajaUe4s8NwCl2YRLly5t3L5P286dOzc8jtJYvHhxIw0dS0HjPn36RD4nAFXf9kvOEUAAAQQQQAABBBBAAAEEEKiiAAGoKtYKeaqDAAGokmupyACUgjgDBgwIB2IVGIqb0aNbXtmDx74AlG6ZZQaX9Tp58mSvlgaZ7e30XrOLfIubpmZE+JannnqqS5oahM6y6FZhmtnlBszsvOs2ZmmX+fPnR/KowXd7URDKvgWijqtn5BS5qL7t8pj3w4YNawQO446t2wSabefNmxe3GetrIFBGG1i7dm1k5pP6EgVffYtuv2n3NWpnuvWjvbgBKM2sspe//e1vYfvU/npem7288MILkc8JQNk6vEcAAQQQQAABBBBAAAEEEEAAgawCBKCyCrJ/pwoQgCq55osMQE2fPj0yCKuZEEmLPUPHF4Byn9sUF4DSMcaMGRM59pQpU7yHdtOMC0BpZ80YMkERvWq2VLvLhx9+GJklceONNzZmhmlmlX2MpOfU2MfWrIyBAweG+9522232x+F7BeLs9DUbrcgl7jlAyoMG8eOW2bNnh/l0gwNx+7C+mgJltAHd7tFu15q5mLS4ASTNwNy4cWO4ix2AGjlyZLjevHH3dwNQ2k63GDV5IgBl5HhFAAEEEEAAAQQQQAABBBBAAIE8BAhA5aFIGp0oQACq5FovKgD1ySefNJ7zZAZgNeNAsxSSlvfeey+44447Gj+6/Z276FZ1Jj29KmAVt7izgXRrOt/ippkUgFqwYEHk+BpgbnfR7DA9m0kuCrZodpKWOXPmRI6RNgC1YsWKyH7Tpk3zZk23CLNnXOnZOO7tx7w7trnSnmFmD8ir/vRMHrUT32Lf4ixu9ppvP9ZVT6DoNqBgrt2m9Qwocz7FaSjY5D4Tym5ndgBKsx/dJU0A6vHHHw/PSQJQriC/I4AAAggggAACCCCAAAIIIIBAFgECUFn02LeTBQhAlVz7RQWgnnnmmXDwVcGGPJ435AaLkgJQ7qwLPcPFt7hpJgWgdBsuOwCmWRNZFh1bg+f28uSTT0aOkTYAZd+yTnmMu9WhjqXb39nlUOCvqMUOPuh5W+6xJ06c6D00ASgvSy1XFt0G3Ntj6tlwaRZ31pTdFnV7TAWG9eMLkqYJQOncNmk0C4ilyS/bIIAAAggggAACCCCAAAIIIIAAAkaAAJSR4BWB1gQIQLXmlXnrogJQ9957byTIkcdt1NxgUVIAavHixZHj+26jJTw3zaQAlPssm7hZVZs3bw70TCvzY9/aq1mFtRuAuueeeyLlTXq207hx4yLbpg1yNcu773M7+KDb/dm/myDYsmXLuuxKAKoLSW1X2HVeRBsYNWpUpD2//PLLqazcIHnfvn1TzwZME4BKlQk2QgABBBBAAAEEEEAAAQQQQAABBNoQIADVBhq7IBAEAQGokptBUQEo3QbLBBj0mscsGzdYlBSAeuSRRyLHj3v+lJtmUgBKg+d2meKOrwFwe7ukZ1W51d1uAErPkLKPqYtQ3CILe1vf82vi9m11vR180C3JtIwfPz5yfM2KUtDOXghA2Rr1fl90G7jmmmsi7SltX/POO+9E9tM5oZlPaRYCUGmU2AYBBBBAAAEEEEAAAQQQQAABBIoSIABVlCzp9nQBAlAl13ARASh1gHaAQ+/dAEM7xXSDRXEBID3T6NZbb43k4fnnn/ce0k0zLgClgWndxs+US8+ceffdd71p6lhmO71OmDDBu51vZbsBKHsQXnlLWtzBcx2zqMUOPkyfPr1xmI8//ji49tprI0bz5s2LZEG/G0P72TyRjfilFgJFtgE9S820E/Oq55ylWXRbPLOPeXVviRmXjnsOFRnEjcsD6xFAAAEEEEAAAQQQQAABBBBAoHMFCEB1bt1T8mwCBKCy+bW8dxEBKN3+zQzo6rV3794t58u3gxss8gWg1Pm6t5i7+eabA633LW6avgDUhg0bAvc2X1OnTvUl11hXdgBKATfbW7PPkpb58+dHtn/ssceSNs/0mR18mDZtWpjWnDlzInlQG7GftWMH4ghAhWy1fFNkG9BtLu22r+BrK8Huq6++OrL/ihUrUhkTgErFxEYIIIAAAggggAACCCCAAAIIIFCQAAGogmBJtscLEIAquYqLCECtXLkyMqg7cODAXErlBov03CPNdtAtt/R8ptmzZ3eZ+aSZNu+//37s8d00dSu7119/vZHeggULAt3Kz76d4FVXXRUokJI0yF12AGrTpk0R72YzoJ599tnI9lOmTIn1yfqBHXywb4Oo2SdDhgyJ5GPixInh4Z5++unwMwJQIUst3xTZBty+RsGoVpZ+/fqF7Uz7Ll26NNXuBKBSMbERAggggAACCCCAAAIIIIAAAggUJEAAqiBYku3xAgSgSq7iIgJQmkVgz0ooKgBlH8P3Xrfh0+B30uIGoHzpmHWaWZXmFl26NZ+CYeZHwbG0iz3zR8dtln+lu3Hjxoj39ddfn3i4RYsWRba3A0OJO7bxYVzwQUlpsN/Ymtdly5Y1jqLgn1lHAKoN+ArtUmQbWL58edhO1F4UIG5lcQNQb775ZqrdCUClYmIjBBBAAAEEEEAAAQQQQAABBBAoSIAAVEGwJNvjBQhAlVzFRQSg3NtiNbslXNoi+4JFevZRr169Gj96RtOIESOCSZMmNWYxpUnXTbNv377B8OHDg2HDhgX2c5U0uK2AVtJsqjTHa7ZNOwEopamBdxOwkUfS4t6CT4PpRS128EGzydzFvV2i3DW7bOHChWF5CEC5avX6vcg2sGrVqrCdmPavW2amWTQLT7MFzX56TXt+E4BKI8w2CCCAAAIIIIAAAggggAACCCBQlAABqKJkSbenCxCAKrmGiwhAuQO7CojoOUVZFzdY5HsGVKvHcNO0nwGlmUXus5/69+8feVZRq8drtn27ASjNMrMH0nURilvc5y/ploFFLXbw4eGHH+5yGD0vzA30ycC+TSABqC5stVpRZBtwnzenc+CDDz5I5aPZjPY5o/e6pWeahQBUGiW2QQABBBBAAAEEEEAAAQQQQACBogQIQBUlS7o9XYAAVMk1XEQASkXQTCJ7cDfNreuaFd0NFhUdgFJ+FIRS0Mkuy/jx45tlte3P2w1AaXaWnce1a9fG5mH69OmRbfXMq6IWO/jw0EMPeQ8za9asSH50C8GnnnoqXEcAystWm5VFtoFPP/20yywm3ZYvzWLnS+eOZhGmDZQTgEojzDYIIIAAAggggAACCCCAAAIIIFCUAAGoomRJt6cLEIAquYaLCkC5AZEXX3wxc8m6IwClTLvPTNJgtQavi1jaDUCNHj06DNgof7KKW6ZMmRLZVrcxK2qxB/njAlCaMTd48OBIngYNGhT+TgCqqNopJ92i28DQoUPDtqK2r+BlmkXtStubn7vuuivNbo1tCEClpmJDBBBAAAEEEEAAAQQQQAABBBAoQIAAVAGoJNkRAgSgSq7mogJQmiVkBnb1+uijjzYtmWYfaOZO3DNcuisApXzdcsstkfIowJZ2tkTTglsbtBuAmjx5ciR/Tz/9tJVq9K0dHNTt7zSLpKglTfBBx37llVci+bfbDgGoomqnnHSLbgN65pvdXvQMtzTLtGnTIvvNmzcvzW6NbQhApaZiQwQQQAABBBBAAAEEEEAAAQQQKECAAFQBqCTZEQIEoEqu5qICUEuXLo0M7vbr1y/QTJekRbeC00Dy1Vdf7X2OS3cFoJTn1157LVIe5XPx4sVJxWnrs3YDUG4AZ8KECd7j65aCV155ZViW++67z7tdXivTBh90vLFjx4b5sgMKBKDyqo3uSafoNuC2fbUdPRsqadm8eXMwYMCASHtL++wopUsAKkmXzxBAAAEEEEAAAQQQQAABBBBAoGgBAlBFC5N+TxUgAFVyzRYVgFIxbr755sgAr25ll7SMGzcu3P6tt97qsunbb78dfq5B5jyCJ26aEydO7HJcs2LEiBGR499www3e2UMa/Nbt5syPBsjTLjNnzowcI+2+Cu7Zz93Sc6t8AT93sF7BgSIXO3CnW/8lLXpOmIKPdvBJ7wlAJalV/7Oi24CCSQMHDoy0m2YzLp9//vnI9vfff39LkLNnz47sTxttiY+NEUAAAQQQQAABBBBAAAEEEEAgowABqIyA7N6xAgSgSq76IgNQ7rOTrrvuukBBBt/iBkZ8ASgzQ8oEKO6++25fUi2tc9McM2ZM7P4rVqyIDDorH9OnT++yvTu4HTcbqcuOQRBoW1M+vbZyW7BHHnkksu/jjz8eOYQCUsOGDQu30XOWiriNoH3QJUuWhMdLCu6ZfWbMmBFubxwY3Dc69Xwtow3YxzDtZv78+V4wzXRSX2S20/t169Z5t41bqQCX2V+vc+fOjduU9QgggAACCCCAAAIIIIAAAggggEDuAgSgciclwQ4RIABVckUXGYBSwEMBHXugVrN0dOs6MztHA7+67VyvXr0i2/kCUO6gr2bLrFmzJpOYm2afPn2CTZs2xaZpz9JSuZTvl156KbJ9OwEolUOD2Pbt8ZS+bl34wgsvxD4Xyz6wgnva3ngrbwre6IL0ySefBA888ED4mew0M6XoxT6mZowpL0mLPldgzJRBrwSgksSq/1lZbWDq1KmRdqNzSQFN3bpTgVa1LbUlneOmfekZaK2eB0pnyJAhYRpKS8+dMn1a9WuEHCKAAAIIIIAAAggggAACCCCAQN0FND6xxRZbBKecckrdi0L+EShVgABUqdxBUGQASkXRwK9vVouCI717944M4ppBYb3aASgFTzTbyf7cvFfAZdq0aS2rJaU5ePDg2Oc7KcijQWtzfL2qLApMrV27tpGPtAGolStXNm4d5qZnp22/10wNBWf0HKe45f333+9yOzINxNuBLR2v6FvvrV69OrjjjjsiTirLTTfd1CVg55ZFAT273ASgXKF6/N4dbUDBbXt2k2lHCrja54DWq+/QOdjKollVN954Y6R9mmPolqM691kQQAABBBBAAAEEEEAAAQQQQACBogUIQBUtTPo9VYAAVMk1W3QAyhRHt9i79957AwV3zICt+6qB4zvvvDPQrCR7ZpMGst3BY3tfzUZodWmWpvIQt8yZM6dLGZQ/PftJS9oA1JtvvtklHbtcce83bNgQl7XGepVNz8dyZxLpOTm6xd/y5csTUea3hwAADT9JREFU98/jQ806icu/nnPVbBk1alRjf7kWHSxrlhc+b0+gu9qA2v+kSZMat5tUcFhtyA7y6jaUCxYs8D6/rVlJNdMprl1r/fjx45slwecIIIAAAggggAACCCCAAAIIIIBAZgECUJkJSaBDBQhAlVzxZQWg7GIpgKIZTsuWLWvcHkuzinQrvqKfR2TnoVPea6bXu+++GwbH6lJuXUSV91afzVOX8pHP5gJ5tAHdFm/VqlWNZ89pdqDSZEEAAQQQQAABBBBAAAEEEEAAAQTqLkAAqu41SP67S4AAVMny3RGAKrmIHA4BBBBAAAEEEEAAAQQQQAABBBBAAAEEEOgxAgSgekxVUpCSBQhAlQxOAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydVPAKpkcA6HAAIIIIAAAggggAACCCCAAAIIIIAAAghkECAAlQGPXTtagABUydW/zTbbBHvssUdw0EEH8YMBbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBuoeBs45JBDgq233jo48MADSx5N5nAI1FuAAFTJ9aeOarfddgv22WcffjCgDdAGaAO0AdoAbYA2QBugDdAGaAO0AdoAbYA2QBugDdAGaAMVbwP77bdfsOWWWxKAKnksncPVX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRfgABU/euQEiCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAClRIgAFWp6iAzCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggED9BQhA1b8OKQECCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCkBAlCVqg4ygwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUX4AAVP3rkBIggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAApUSIABVqeogMwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA/QUIQNW/DikBAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFApAQJQlaoOMoMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F+AAFT965ASIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVEiAAVanqIDMIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQP0FCEDVvw4pAQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQKQECUJWqDjKDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRf4P8DkWz/LdtuXjMAAAAASUVORK5CYII=) ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAyAAAALtCAYAAADQcQC2AAAgAElEQVR4AexdBVgVSxvG7u7uwri/3d3YjaJYtN2tmNiBrdgtdnd3N3Z3d3vf/3m/c2Y5IGIrcOd7Hji7szOzs+/uzny9VtCkEdAIBFkEateyRb169YPs+PTAgj4Ci5csQ7q06XHw4KGgP1g9Qo2ARkAjoBH4TyBg9Z+4Sn2R/ykE3rx9i1evX+P9hw/Gdf+Lf/Hq1Wtj/9Onf/H27Vtjnxvv37/Hhw8fjTLWsWxjHPhDG1u2bEOWLP9g774DcsYPHz7g7dt3xtlN4zVdI499+vdfueaPHz8ZdYLixsePvhgHxfGFtDG9ePkKGdJnRMeOnUPapenr0QhoBDQCGoFgioAWQILpjdPD/hyBp8+eo1bNOqhSpRpq1KyDseMn4vqNm3j58hXu3ruP9h06o2aN2mjSqCmaNHZA4yaOyJc3P+bNmy+dXbx0GXZ29qhdszaaNG6Khg2bwNmlGYoULopOnbp8fsIASlauWoOC+Qsif9782L5jVwA1vr2oefMWiBMrDl6aBactW7ehbDkbNLRvBNvatqhfvyGOHD0uHY4dNwG5c+VFUwdnnLtw6dtP8hdqrl67HmVKl0WXLt2++ey8f0eOma71mxsFUPHff//FSM8xOHv2nJ+j16/fgHvvfnj16pWf8m/Z8Tl3HufOX5CqJ06cRPr0GTF+/IRvafrVOsdPnsLDh4++Wu9rFfr06YeKFavg46egLZx+7Tr0cY2ARkAjoBEIGQhoASRk3Mf//FVs2rwV8ePGR+GCRbBjx04cOHAQ5y9chIOjM3LmyIm7d+8Jk1jXti6srKwwefIUHDh4CNn/+R+srEKBbiq0IqxevRZhrKykzrQZM3Hq9Bk0b95S9h0dXQxhwD/gZGw3b92B0qXLI1uWbEicICGSJE6KEydP+a/6zfu1atVGfTt7o/7Dhw+xectWpEqeAmGsQmHDxs14/vyFHPfxOQuPgYOxbv2Gv2q1MQYbyMbNW7cRLUpUZEiXIZBavodo0Rk9diJu3LjlW/iDWx8+fsSo0eNw+cpVPz20bNEKkSNFwSV/5X4qBbDDaxk30Qvv3pssUXzO8uUrgIkTJwdQ+/uKKBS1bN0eb9+9/76GAdResngpEiVMjD1ma1oAVXSRRkAjoBHQCGgE/hgCWgD5Y1DrE/0uBHzOXUDixEmQOmVqfPDnfnT8+EmUL18R12/ckNOPHDFKhImjx07IPrXXFEgyW2cxhpcmZWqkSJYCl69dN8ooUESLHBWnfc5LGd27Nm3agiNHjsk+hRdaPJ4+fSr73Xv0EuvFyZOnjT6+Z4PuVGXKlMOQIcM+a1a8WAmkS5Pus/InT58ZZTt37cb2HTvx9PkLLF6yFOPHT8TzFy+N45YbZPC3bNmKtWvXS/HLly8xc+Zs3L5926g2Y8YszJ49x9hXGwsXemPGjJnYt28/uH3o8FGxOnl7LxLhje4/a9asxarVa3Dl6jXVDHlz50HRwkWNfW7QwjN82Ajwnii6dfsOOnbsgqxZ/sGwYSNw7pzJ0sDjK1euxpAhQ8W6perfvHkLCxYsFOGSZXv27sPGjZvUYUyc5IUtW7cb+9x48+4dypW1keegRw93P8csd7ymTBXBlUIH6cLFS6hevSYKFiiEZctX4v6Dh9i1cxcOHDiMd+/eiwC8dNlyERqPHDU9J7t278GYMWMNq8brN2/g6TkGc+bO84PPsWPHUSBfAaRKkQrz5i/E69dv5JyPnzzByJGemDV7jrjcqfGtX78R8xcsxI2bJiFt587dOH3mjDqMFStWyvV5ey82yvSGRkAjoBHQCGgE/hYCWgD5W8jr8/4yBOgeRSFi7fqNAfZ56/ZdvHtv0iL37t1X6irXpfETJsp+4yYO0vbR4ydImSw5UqVMjSvXTULLiVNnpA7du16/McWN0CWI58yUMbMft5aHjx+jT9/+SBg/IX6G2Xv/4SPKlbMRZtXyol6/eSeMO4UkS4GDcRVnfM5KHAvrd+3aU8Zna2uH/h6DRWApVqwEbpiZZ8s+P336hIYNGyNWjNhYuXodZsyai7BhImDc2PFSbfCQ4bCxqYQSJUphgMdgo2mbNu2QPXsOcQWjq1jp0uWwddsOXLh4GUkSJYF1Rms8fPwUCvP27TsabbP/kx1FChcx9hcsXISGjZqgatXqSJc2A7Zs2yHHjh0/idQp0yBCmLAoUaI0Dh46LOVjxoxD1izZxN2unp097ty7L+WXLl9BlszZxBq2bsMmdO3WAwkTJsHzFyZLUfeevbFp8xbjvNw4edoHQ0eMRpyYsT4TilTFTp27wsamoowhY6YsePz4CbZt34XIESIiVrQYaNeuI27fuYcmTRwEdwpjp874iNWBz4kSbGbMnIVKlari3r37uH79Ojp27gpXt+bImycf8uYtgDt37sopZ86eiwhhwiFNylTo6d5XrFoUrugOWKpkadSuUw9t2nUwLHKjPMfIecuWKYeXr96gaNESKFK0uBo+tm3fiRjRYohwZhTqDY2ARkAjoBHQCPwlBLQA8peA/x2nvXz1Go6dOCUxAGfPXwT/aB1Q2z/9G1BfAZWZzy3n+9pxc10y+efOX8T3BiiTUa9ZoxbChwln+OEHhu2gQUMQNlRoYRQ7dOyCsKHCoEKFSgYzf+PWLWTOaI14sWKjTm1btGzREvXr24sWmgygotevX2PipMmYP38h3lsEVV+5el2yVpHprF2rDu7df6CafNcvr8umQiXMnTvPT7s3b99/UQA5feas4ZK1ecs2YUiLm5nQo8eOyz6vPyBatWYdEiVIiDJlyktcCTXwFy5cxNAhwxAzRmxpsn37DumDWnZaV3iNtGyQUqZIhfp2DYyuXV1cETN6THz8F3j1+o3UbdWylXE8x/+yo1iRYsZ+xgzW6GQOko4cPiIaNWpsHKterQYihY+Ig4ePStn8Bd7S3ySzmxMFtTy580qsDysMHz5Sjo8eMw7Pnr8QtzTeL+KzfsNmP88YLQtXr5kEzXq29RAmTBh4L1pqnJsbgwYPQZrUvhanESM9RQB58uw5UqdMhSyZs4qliXUXei+Rc/fvN0D6WLd+o+y3a9te9okFkxuQKHSFDhVWhAi61hFPxvKQXr5+g1w5c6N8ORvZ5z9agliHmFKgjhQhMgZbWMhy/JMdSRMnxb0Hj3D48BEsW7bCaHvj1h3ky1cQ06ZNN8p+ZoPJEE77nMOpM2dF4DTNNef9zD2BzjdfmBd8LOcOy+0v1P+eOcbPeNhfQH2eu4AvjsFyPBbbp3zO4ez5Czh/8RKOHDv5w+/8z9wP3VYjoBHQCAQ3BLQAEtzuWCDjpRaWbkjUpmbKkAnWmTLDOqPpjy5GmTNl/vyP5fIXwDF/9dmX/z54Dinz37+538/aWH/eB9snip8QuXPnEzeWQC7xs0P0j69cqYpoi0+f8fnsuP+C/v09pC6ZygULveG9aLGfmIkbN28jQ9r0yJA2HTp36YYE8eIL03fY7Grlv78v7ffq1UfaTfaa8qUqgZYzVqFM6XKYPMnLTz1aQAoVKCSacUsLCLN30T1JBVGvXLUakSNEQjuz1YEB9hHCRvhiJqQlS1cgdszY8PAY5Od8RQoVQYwoUdGiRStUrlQViRIkwsJFS2Fn10Cu79yFi1I/beq0EtdBwYnk5OiMZImT4f3HT7h//wEihotgCBg8ztibokV8XbAYO7Fz916xpoQLEwatW7eVfvjPpnwFRI0UBbv37JMytgsXOiwOme9JC3OMDhlAUr/+AxApQkQcOnxE9tU/WiGGDR+pduX38OGj2LBxE968eQOPgUPkmpqarWGsQIsXmfrKFSv7accdni9NqtSwzmQNPjckuktRSBg8yNdSlDF9BmRMnxGPnjzFmzdvQbcrEl3dTpw4hTXrNooQHC1yFHER47F7Dx7in2z/M4S0t+/eIX++AsiaOSvM8gvix4mHKpWrSl/8l9U6C0oUKwHGI/knun7FihU7QDc6/3W/ZZ9B93y3iU0W66zmecbaNJeoOSGQX2Ne8DdvGPOJv7ZGfYvyLOa2xjHzvir3M+f5P4+aF9mfOmadRa7D/xxnuR/Q+BjLxDoF8hdAmlRpQVc9TRoBjYBGQCMQOAJaAAkcn2B1lBpoMj+J4sWXBZFMdLo0aeUvbao04F+61GmRzrwtZalZZionE/lZHXMZj0lb7qc29WXZp29fpj6kjupXnU/ObWpv9MUxpUqDODFjI/v/cvyQ9pDpRXndU6YGrN0lQ0Y3I1Kvnu4IbRUajC0IiMhIJk+SDOnSpsP7T//ixMnT0jcDyh88MsV3BNSOqX8tBQLWoTtMBwu3o4DafamMWvLy5SuI9t2yzrsPH1C4YGGQqfVPZGafPjPFgaxYuUqYcCWAnD13HrQsfCkV65w585AieUps3OTXPalUiVKIFT2GaOv37zsgrkOMJZkzdwEypM8AWjUYwM/MT2PNLlscV+NGTSR+gazwo8ePxYLRsUMnY8gUQCjcKPI5ew4DBw2B19RpIvQxKFwRBUzG3+zZu1+K8uXJi+hRoolljwVubs3kHqlMVD179kL8uAlw4pRvAgAKaIsWL8XBgyYXLrbjM+E5eixKlCwNB0cXNGnqhNQpUgpTTbcsEi0RSRImRuFCheGfrb989boh6N8yu07xGeSzOHTIUGnPf3QXowDVo2dvo0xtLF+xEmPGjkfvPv1EoJg02SSwPnz0GP9k9RXSaDnJlvUfFMhXUJoy6D1B3PioWKGS6kqeifLlyhv7lhtLly6XcS1Zssyy+Ie3eb/SpkmHhHHj+84fX5pjWM45w+KP77/a5/zDbTUnyDF/bSzLVD3fOYftOT/5n5d8+7Xs37Ttez41Dt86fsdqeR4Zh8U8yWPWGTOJ0iJWtOiC8ZDBvvf+hwHWDTUCGgGNQAhHQAsgIegGuzi7yQLYqFETPH/5CmRi6JdO/3jGQfCP+7fN21J2567ss+z2Hb91uO9bZq7H9uZ6Rp9G36Y6ql8yZZbnkv7M5zGd755pTLfvSmwCjytB4Xtuy+GjxxArZmy5dhXbodozsJnuSIpGjDC55yiGUZWrXzKZzDLFjFp0sSG1b9dB+q5RvZaqhnfv3olG/uw5UzpXxp9kts6KTZs2G3XsGzbBggXexv73btjVs0NlCw23al+4UBGEsrLCdQuXMMau0I3qhTnQnNmwIoQNj8qVTJp7piMmY9yr1+dMMPtdsXI1kiVN7gcrljM2hO3WrdugTo8P5oxPGzdvlQxhbs2aQ1lCVCUKw3FjxZHdW7duSx8DLawrFEDKlSknxxnQnymjNSqWr4BXb94iSoSI6Natu+pK3OVonVHCkceAQdIfA8xJDMpn0gAKOqSRI0dJvMO1Gzdln//oKuY1xa+Ayuf08NHjePz4MS5evIRnz55j3QaTy1SzZi2Mtr3cTdYsZh0j8Rl98fIlHj1+ity58ohVgu6DJMZaEK8J4yfKPv/dvHVH7hdd8iyJgeKsu379BqxatUa25803PS+0chTMX0jiPVQbh6ZOiBsnnuzS0sS2lumhs1hnRsUKFVV1P79MNZ0jey5c/wWZxNjx+/cfRIi/efsO7ty9J4kAjPngjum9NuYB9c5z3jDPKZyHuK3mCJkb5Jh5fjEfU3Xk16IfmYP8z0/+5yV/55K5z2L+Mp3TNA7Vn//z+Tmvxfl5beyP18954sr1m8ibO6/ckzFjTLFTfm6A3tEIaAQ0AhoBPwhoAcQPHMF7x8XFJIA0c2sevC/kB0a/afM2lChREokTJkafvv0wYuQouPfui2YtWuOMj498mNBr6nRxXyLj5uzsCgoulkQmvWu3nghtZYUwVqElKPq22VLSqFFTYS4KFiwCujMxdoD9ZMmSTbpgRqRIESIJo9mzV2+MGTcRm7b4zbZkea5v2abgkzhREiPQWLVh7ABjEhhsPWToMLne6TNnGxmQWG/16nWiIaf1YMq0GeIeQgvHseOff0uDFqJWrdvK9TRu7IA7d00B3ezn1p17KFSwCKJEjIwuXbth9tz5kk2LMQZVq1THmrXrwdiQtWvXYcMG32xTY8eZgvsZgD5p8lQTVtZZRHhlIHz0qNHFjWrXrj2SZpauK3RBGuE5VlyPqF1XLnWM5SDWrm6+QoEbA7fz5kez5i3Be6IET1q26KrE+j179QFdl0jEYPYc33iates2yDdfWrVqg2vXfbOdMWidbfnHgHKmOaabH4U+Cjn9BwzE8hWrJYsVcaOwz7rzzIJmN3P70qXLCmPOc9NaUbhwMfBclrR48TJpS/euXu59xU2tWrWa4qrFemVKlUX4sOGxcctWaXb06DGxUtG65OjkgkqVq+GqObPY3HkLpK/oUaKDcSf+qXOXrmjQwDels//jev/nEeD3f/gs0OKlSSOgEdAIaAQCR0ALIIHjE6yOurqaXFEcmjoGq3H/ysEyxSmZaWYtatO2PR48eCjdUzver/9AiXFgIG6Xrt2xycIywkpnfM6hZ093cSWaPNkL7Tt0Ar+vQSITyXSpdA06eMgUW8CUvpYxHsyu5O7eGxQE+UHCnyWmbs2Y0Vq+TeK/L37HonfvPujQsbMw4YePmAK0Vb2lS5dJJq5Zs+dhwsTJcHF2letTxy1/mYaXLliMnWAA9+UrvulyWY/xCIwNadrUEYwlIF27fgNJEyURhitq5KjyGztWbOzavVeO02Vo6tTpcHfvI+mJ586dj65du0tq2ivXrmP8+Emgq5QKYmf2rB49eknAOIODWVd9AZ4BzxS0GK9jSQMGDBQBgAH2iigI9uvXX6wgI0eNxosXpg8L3rx52wj+Zl3eH7qj0dKhLCmPHvEZ8cCkSV6SfYzCBDXjpGfPX6J7956C9f4Dh9TpJHVuL/feOHXalG556NDhcm7GmjAjlyK6wKlUvKqMwi3d1ugWyMxhtIT06zcAt26Z3AO3bt2Gnr3c/VilKGhRMGI5rV6KpkydJimbGeO00F+qXV47Ewnw2db0exBgsgN+fJQCyOjRY3/PSXSvGgGNgEYgBCGgBZAQdDOp1ecC6OjoHIKu6r99KdWq1kCpUmUDDCwODJm169bLh/XUtz0Cq/sjxygcDBw0VL4fQgZ+7979qFS5qnxt/kf6021+HwIUTtKmzQAKQZp+DwKWAsgYLYD8HpB1rxoBjUCIQkALICHodlLLTQHEwcEpBF3Vf/tSrl27Lh/544cJv4fGjZ8gzwItCSo24nvaf61u+/adULNWHTx7/lwyb/G7KD3d+0lGqa+11cf/LAL8ojrjFTT9PgT8CCBjtAXk9yGte9YIaARCCgJaAAkpdxKQuAYRQP7DLlgh6Hb+8KXQpWrg4KGS5pYfz1PxCT/cYQANr169DsbF1KhRG7a29cDYBf8xNQE000UagRCJADPDFchncsHSMSAh8hbri9IIaAR+MQJaAPnFgP7N7pQL1n85BuRv4h9Uzs3gaPVBRwZhvzRnxvod42PcxMOHj/DkyZdTFP+O8+o+NQJBCQFtAQlKd0OPRSOgEQgOCGgBJDjcpW8co+GCpS0g34iYrqYR0AhoBH4eAZMAkk/cHpkIQ5NGQCOgEdAIBI6AFkACxydYHdUCSLC6XXqwGgGNQAhBQAsgIeRG6svQCGgE/hgCWgD5Y1D//hNpAeT3Y6zPoBHQCGgE/COgXbD8I6L3NQIaAY1A4AhoASRwfILVURcXcxYs7YIVrO6bHqxGQCMQvBHQAkjwvn969BoBjcCfR0ALIH8e8992Rm0B+W3Q6o41AhoBjcAXEfDrgqW/hP5FoPQBjYBGQCNgRkALICHoUdACSAi6mfpSNAIagWCDgBZAgs2t0gPVCGgEgggCWgAJIjfiVwxDCyC/AkXdh0ZAI6AR+D4EtADyfXjp2hoBjYBGQAsgIegZMAQQ/SX0EHRX9aVoBDQCQR0BLYAE9Tukx6cR0AgENQS0ABLU7shPjMfVtZnkoXdydP6JXnRTjYBGQCOgEfgeBLQA8j1o6boaAY2ARgDQAkgIegqUBcRRW0BC0F3Vl6IR0AgEdQS0ABLU75Aen0ZAIxDUENACSFC7Iz8xntq16ogFpHr1Gj/Ri26qEdAIaAQ0At+DgBZAvgctXVcjoBHQCGgLSIh6BqZMnY7qNWph8uSpIeq69MVoBDQCGoGgjMDTZ8+RKUNGUQANHjIsKA9Vj00joBHQCAQJBLQFJEjcBj0IjYBGQCOgEQiuCDx7/hw2NpUQK3YcTPbSCqDgeh/1uDUCGoE/h4AWQP4c1vpMGgGNgEZAIxACEfj48SPu3LuPq9eug+5YmjQCGgGNgEYgcAS0ABI4PvqoRkAjoBHQCGgENAIaAY2ARkAj8AsR0ALILwRTd6UR0AhoBDQCGgGNgEZAI6AR0AgEjoAWQALHRx/VCGgENAIaAY2ARkAjoBHQCGgEfiECWgD5hWDqrjQCGgGNgEZAI6AR0AhoBDQCGoHAEdACSOD46KMaAY2ARkAjoBHQCGgENAIaAY3AL0RACyC/EEzdlUZAI6AR0AhoBDQCGgGNgEZAIxA4AloACRwffVQjoBHQCGgENAIaAY2ARkAjoBH4hQhoAeQXgqm70ghoBDQCGgGNgEZAI6AR0AhoBAJHIMQLILdu38HsOfPw8OGjwJHQR4MMApO8pmLJshVBZjy/cyCr1qzDlq3bfucpglTfo8eMw6rVa//4mK5dv4FatWzRuHFT3Lx1+4vnv3v3HubN98a9e/e/WEcf+HYE7ty9hyHDRuL0mbPf3ug/UPPff//97Cr37T/4V+e9+QsXYcasuZ+N628UDB/hifUbNv2NU3/xnMuWrcTuPXu/ePxHDizwJuZzfqTpT7WZPGU6Fnov+ak+gnPjT//+iwXei3H06DG5jKdPn2Hm7Lm4cvV6cL6sYDf2HxZA3r17j9Zt2qNOnbpo3rwlnJxd4eDgjDp16sHZpRkuXLz818F4+eo1atW2RYYMmXD/wcPvGs+nT/+CzEiPnu7Yv//AN7ft2q0nqlWriU4dO8PevhF69+mPC5euoHPnrqhn1wB79u775r5+pKL34qWoUaMWOnTohKvXb/xIF7+sza3bd3H9xs3v7m/d+o2/HafvHtQvavDk6VPwuSTNW+CNOHHiY+y4Cb+o96DfzcpVq3Hg4KE/OtBTp06hfYfOGDVqNGrVqoMmDs54+uy5MYZLl6+ACxCpSpXq+N//cuHGzVvG8f/6xvMXL3H2/IVvguHN27d48uSpUZfbi5cux/Ubf3cuMgYUyMabt+8wfORo2Nk1QPt27eHq2gxOzm5wdWuBqlWro5d7HxAL/zR27AScPnPGf7Hss89GjZuiRInSmDN3gZTduXMPb9689VP/6LETSJ06HRydnP2U/8mdHTt3Y8uWv6MMOe1z1s87uWz5ChwxM4d/EoMvnWvZ8pWwsrLC7Dm/VkDbuWs3Nv8FzCnc7dq150uXG2j5iZNnPnt+A20QBA8OGz4K0aLFxMZNW2R0TR2ckCJFGpw64xMERxtyh/TDAgglyE2bt6BMmfKwta2HjRs3Y82adVi9eg0mTPTC/IWL/zpq799/wISJk1Crdl3c/U6N5vkLl1C9Rm2ZdNauXffN17J333507dZD2lWsUAkHDx2RRcvNrbmUTZ485Zv7+pGKDx89RoliJeRc+w/8WUbP/3i79eiNI0eP+y/+T+/PnDMfe/eZBNpTp8+geo06mDJ1+n8ak9998QsWeqN+g4bGafbvP4iXr14Z+8NGeOLkqdOyTwtN7Tr1cPHS31egGAP8yxu79+zDmLETv2kUe/buh9fUGd9UN6hV+vjpE46fOIV6de1k/mzQoJFo4bdt246ZM2fBtp49Dh0+6mfYS5cuh5VVaKxcvcZPOXfu3LmLuvXqo3fvvli+fAW8pkzDP//kwMxZc+DfAEJll6tbc3Tp2v2zfkJ6AXmJvv0H4e3bd0H2Us9fuIgC+Qti6rT/9lx9/eYtbNm2M8jep28dGPk0mwqVscFsZZs3f4HweydPB6xI+NZ+db3vQ+CHBRB1mlGjx2LS5Klq1/h98fIlTp0+jSVLl+HUqTP48OEDnj9/gZUrV/nRgF69eh2zZ8/Bjh1+H+rHZi3akSNHsWqV7+TOB2b9+o3GebjBhYN09uxZWSjUPsso0To6ueHy5StSh/8uXbqMCeMn4tgxk/nNOGCx8erVK5w7dx7VqtaAt/ciiyPA8eMnMW/eArwIQBvGij7nzssCNnz4SKPdjJmzET1qdCxctESuf+/e/cYxtXHx4iVMnz4D+/YfwPsPH1Uxdu3aLabfj5/+xb1790TwMw6aNz58/ATvRYvlOulyljpVWsPtYfv2HaCm5dVrk+ad2jZLDS8xnTNnLh5YuKmdP38B6zdsNOpxEb53/4H/02LVqlVYsmQpLl+5ahyju1u7dh0QJ1YcjPQcjQcW1qd16zaA2q3AiC4yjx8/MaqohWnevPnwOXvOKOfGx4+fMG36DLBfS3pmvjcnTpzEsWMmIejDx4+Cr//nh/dx9uy5WLFipTANqp+Hjx6BDMYrs8VClfN31arVWLp0mWURiNmKlav8lFnuHDx0GHnz5IeTkzOOHDE9e+59+mPtug24eesW5s9fiFev31g2wYmTJzF+/ARcvXrNT7n/nTNnfDBhwkT4+JhcXS5cuIjFS5bixIlTePv2rfzt2r0Hhw4dMZqqZ2zRoiXYtm2HUc4Nda9PnjyN27fvyLFHjx7Dy2sqtvt7V2nR2LFjl5/2V65cxYwZM42yT+Z39MmTZ2JZVAeoMSdzx2v3r11+/MRklVi/YQO2bfc7PtVe/XJuWbx4KWbMmIXHT3yfne3bd8LFxU0sH/MXLMR9i2f42fPnGDN2PHLlzosBAwZKO1pDWrftIN0eP34ca9Z87i52+MhRTJwwCbzGL9EH8/Xu379f7oNlPVpf3r9/j6vXrvsRdKjQmeI19ZvcRfft2y+47T9w0M9coTDknLEmAMXJseMnMGnSZPB5sSRlEeJzyPlD0e7de1GmbDv8PO0AACAASURBVHnUqF4TXLQV8drnzp0HPjuKrl2/KcqosmXKgVbM1+b5hlZQS40/3bL4zh74ioKE8++4cRM+GyvPx/lh5szZ4DtlSc+ev5Bd4kM8/dPWrdswd958cC74EvG6qO2eMs2vIOVz9jz4XllSh/adpC6tJv6J1m/2c+War1tH587dMH3mbKj3wbLNtBmzMWXaTLHG8drUO0hBZsuWrdi0aQtumi1zZ8+dx7r1G/y8S5Z9cZwTJkzC8eMnLIuN7YULvWV9MwoA3H/wCI8eP7YswrJlKwJ8B1iJ7+zixb73n2Vq3b569SrmzJ2H1/7ms1OnTpvnMxMmdI1s0bI1ihUrgUWLl+LFS5Ni4NHjJ7j/wHe94bvq5TUF69at9zM+yx06tak5jXzHocOmue7Zs+eYOWv2Z14QL1++lPeNa5t/2r17D6ZPn+kHD2cXN6w2zwcLFy7Cw4d+vSo2b94ivMzRY8dBoUrRs+cmqxl5HK7FlsTrVfeU5Zy/16z1e40fPnyE6m3BgoWg4op06/ZteQdemjFj2cePH+WeBTY3sd7tO/cCXGPnz19g9C8nsfh38OAh5MyZG1SmHj58FJwH+T5R0Us6cfKUrMNcN0l8x8iXcPwzps80vCGI25w588D1xD+Rr+I6r65XHeecQeJ6/sAf7qoO12Ou4/ResSTye3x2rl33fQ95nPP8IvPzy3fNwcnNz3zCtXz/AZOykPyspl+PwE8LIN179sYUfxovTo4kvkixY8dFkkRJZf/w4SOIESMOPMeMk32fs2fRsLED6tatjzSp0mLBAm8pHzRoKIoWLYGp02egX/+ByJ0rryxYtEoM8BiMRAmTiADAykePn0DFipWFyRg5agxy5cqH1m3bSz/8t3PXHtg3bIrLV0wCCBm5Tp27oXSpMqhYqSqOHT9p1A1oo379hn4YKdbhAl2tWi3cvns3oCbYvGWrLDzu7r2N4+PGT0LcWHHQqXNX0L81YfyEMnmoClxIW7Vph5o1aiNF8lTo1Kmr8RI2a97StMjVq49xE72QKEFi9OzpLpMN23MxrmBTEdaZMmPYyDHiQpD9fzlxzrxYtu9gWiRbtGiJt+8/IlWqtGjWrKWcun9/D2lXu05d1K1nj4uXTDht2rIN8eMnRPKkyTFuwmSUK1cBObLnxIWLl6QdmQv6rtK9oGTJ0siYwdrQ7B87cQpJEiVB9MhRwH7ZJyfkXr16o0iR4nJvv+R2NGjQEOTLVxBcmN69fy+uM+xjzNgJqFmzDgoXKY6Tp0wT8IsXLzB8xChUrlINWbL8g1GeY2Rs1NjyPGzDSaZFyzZ48OABxk+YhBo1aiNnrjwYN96k0eXi1NO9D2xsKkkbWsvon00GgS4Jbdt1RI+evUEBUtHR4yfRslUbqU//dhKZfO9FS9GmXQfQDc//wsw6U2fMQopkyZEzey5MmzFLMOFz0a17LwwbPhI5cuRGx05d1WlkMmzdph2KFimO8jaVAmTEWJn3hC5GxYuXFIyuX78BaqKTJEmOGNFjSX+cfMkMde/RS/aHDxuBggUKYaTnGLi4NkfaNOlFeKJA17p1W7EY7Nl3EBUqVRUBiYzE4CHDUKliFRQrXtIwXZNx7e8xGM2at8KIkSZhk8zm1KkzYGtrhz59+st7TEwp/KRPlxFLlpgENy4kfft5iNtT7dp1Ua16LZw8eUrGx3vF+0qXmLZt2ws2S5YGLLjSmjF+0hQ0bNQUxYqWQPUateR+syPOF7lz50WRwkVRs5Yt+Gwoooti/QaNkNk6CypUqIxLl6+CSoAG9o0wbfpM9OvvgcyZs2Kyl6+C5YzPObRp2wFFChdD3XoNRIhQ/anfLVu3g0x4u/Yd4TFwCLLnyI2hZmUEhbdMmayFQec8VKlSVWnG+2XfsAkKFSqCcuUr4PYdk9Cn+rT83bl7j7h5VqtWA9bWWbHWzJR5eAxC9uw54DVlOjw8hohLDwUsRRy7s0tzlCldFrlz5wOFFxKFT84d3br1wJx53siRMw8cHJzw+s0bTJ02E9mz55R3n8omEhdrCs716jVAntz5QKaa78vho8eRL29+pEiSDG3bd8Ljp88wcuQoeU/UAs65YNgIusLZCuarAxCSeI4TJ0+L+xPxaNioicz/nHffvjNpyRctWYY6tnYoWaqsMe906tQZeXLnEZcvPlcZM1obzyn75P3t3KU7ihcvhR69+nxmhZCLA+DpOVrelZnmeIjzFy+B99Q/EYcJk6fBOkNGJIgTD/csFDisu27DJoSxCo2s1plB4fftu/fSBZl0S4FM9UvPAc6nnBNKlSqD0mXKg/MTlTuJEyVFxgyZjHVsstc0lChV1rDcqT74e+3adXTp2gOlSpaRd4vKJp7TrVkLcQueNWce7OzsUaRoCWM+nzV7LnLnyY/NFtc5d95Cad/AvrFYbyZOnGQoI/iM0aqYK2ce9OnbX05/+rSP3BNacYYOG4FcufKifcfOxtAoELRq3Q5FixZHufIV5V0jE8v7kSZVapDBp+DBtTBrlmziTcHGd+7ckTm1YoXKMg9y3SGjbUkUvOvVq49iRYtj8bIVgiOVPYzZWL12PQoXLgbbuvUNge3Wrdvo22+AKBjz5i0gVinVH+f3Vm3aI2/e/KIlV8K5k5OLuJvTAsD3p0kTB9UEa9ZtQOUq1c0unDmNeYzMev68+bFi5Wq5Br6ve80u3VQGZMqURZ5tdkQlyohRY+DarKXwCFyz6OpevXpNVLCpgJWr16F69VpyjRQAGLPD98/RyRVKCDl/4bKcp3PX7jJfB8Tk894UKFgEl69ew/sPH4Qf4lw9dtxEcaEvULAwKET5pwkTJws/QEvQ+AmTMXf+QiRMkAgUxkiM24wVOy6mTpkm4+b95LvrOXosbGwqomq1mqJ8PXHqDPLkyY96dvaGuybnj6FDR6BK1RriVdOydVtZH7luNGrUROb2A4ePomz5ilhsXj8sx0flE5/pHDlzw8HRxRCunjx9Bs/R4+T8terUMxSlXHuomKZLMImCsV2DRgZvQYVtw4ZNRNgaNXrcN7ugWo5Jb38dgZ8WQOhLV66sDXr06IU2bdqhbbsOOHbCl6k/e/Y8smTOKtqYO/fu+7F+dO7aQ2IkOEyPgYOEYeH2kKHDETliZFSsVAXv3n8QTU/YMOENH9q+/frLREc5d9eefUiRPCWyZs4KpfVOkzodhgwZKldPrV2jJo64f/++MIkDBg2TxY0HyWBw8guM6F5G68D30NZt22UB69Onr9GMAU5kAJu5NZcyx6aOiBQhMogJiUJUtqz/yHajho2QMH4iXLxs0rBevHwFUSJGQs7/5cCFy1dhW9sWoa1CGRodam/ZN608FP7J5GZMnxFnzNYCTqCxosVAjv/lwJt3H8RiwYmZTD7bjRtnYlI4sZQo4YtH0yYOcpxxJes3bpbtvn36yRg5gbMtrRnv3n9ElEhRJPZHDgJo0tgBiRMmxm2z5mL1mnVS//TpM6LlihYl+meWLLbt3KUbChYsAiYPILPh7OyGOLHjGkwgffgppJBGjBglwiu3ubiFDxdRrB30qeXYWjRvJfjSojJv/kIMGjxM2hFPCnHUEvFe1ahVV8qfv3gh5z579pxY7CZ5TZNyCpSlS5eT7QULF0GVP3/5CsmTpxJLCBfOeQtME/Eoz9Ho3t3E6Esji3/0K58yxdQvJ1cKpP/7J4cERlNTXLhIMUPD0659J4NJbNmqNQrkL+RHs8ZuabXq7zEE1NyT2rRtL8wpt2/cuIX8+Qpi5KjRWLZilYzpzRuThaV6tZqC0cRJXtJu9OixKFioCB49eYpOnbogU8bMmDl7nuB68+Zt2aZQSuICkCdvAdmm+xjvLYlMD5kFupgNHe4pZTRtN27qJEIdg+3jxoknliYepDWFPvaKSpYsg7JlTThz0UqSOKks+Dw+ePAQ1LW1U1X9/Pb3GAT33qbnkgcoENe1rWfU4RidnFyNfcsNWllcm7UytKV0vbLOlAVlypSTuWf+Am/kzJlHtGpczAYMGmpoecmAcYH1T2TcYsWIKUoOHiMDmCxpCmzZuhUbN25CxPARhTkgk00tPYPya9auazAR9g0aIkf2XKA7pX/iM1OpSg0s9Da5uFIRUb1aDanWpUs3uafNm7eS/a1btyNB/ERiUaEVq0LFquISyoMUTBInSiJa8A0bNyNGtOjIn7+gMIA8njJFKtFUc9tzzHgRkKVTQGIivM2WD2pwU6ZIjZEjRslhcVWywJ7uTP9k+5/x3JLpoAsSafz4iShT1uYzZpLHKKAMGWJ6X6dNm4F4cROAaw3vAQXdE2aXuRkzZ4mgQQ1pV3X9LVpL/yNGesK+UVPZpjVooPn9ZwEFS7pGBUTjx01A2FChxZpDBQAFzeNmwVjVJ8N07vxFfPwX6N9/gODO9cQ/UXjlXMS/4kWLY6WFNd9/3UmTvZAyZWocPmLS3NeubStMFevR2tCmXUejCRlOpQwyCs3KKF7nqdMmC1fPXn2EMb/34CGqVa+J6NFiGHMpBeTyNhVFEOO7SEFz+87d0h0FLgo4VAKR0qRJJ+sTXZmpge/U2ddVLHXqNGJ9JdOaNk06EUI55zKRQ4EChXHHrKhr16GzKEbYX9u27UQpwG1a5MmcPn9uissiZmFChcHOnSarqsegIejRw13GQatY2rQZcOGiX0sUn4tqVaojQrjwRnC1nV19FCxU1NC8U8Gj1vIZs2ZjlKdJoObakTRJcmFayYhyPqBlllSpcjXDEubm6oYY0WKCAgrn3VJlyhvzRrESpcEkAiRnZ1e4uTaT7UaNGsu9p1KKRGFNzXm0LsaLG1+sJjzGd2qemZlv0tRRzst4QeuMmRAlUmRsM1uey5WzEeUp15zXb94iQ0ZrwwI9cdIUcF2iJZDKNEvPBBmAeY2lcMh7xPvr4Ogs8/IkL5N7OHmevn1MQqVqo34puDNJhyIqaPv1G6B2QWXp0CHDwNinOrb1ZI5h/AyJSplChYqKYtU07szg+0vinFCjZh3Z5j/yAIyhJY/HuCgK33Pne4t3iP+4UsayUFhXFCtmHLEOv3v3Dl269TS8CigMUiil8ErrDGMBlTLsjM9Z4RPpakeigKnccJu3aPNVRbU6t/79PgR+WgDp5d4XJUuUwugxY9G3b3/RiviPPaCmzcSsmh5EDpELKRcNmui2btshQop9w8Yyek6unDgYV0Li4mFtnQWHzOZ2z9FjxF+PfZAY9D5x4mTZ5r/GjR3EIsBtujM1dXCRBZDmaEreXFSoaebCSE1mYFS3rp24GgRWx/+xbdt3yvX2NGubeXzi5ClSNs3sQ0ppP2yYsKJp5XG673DSX7hoKdKlToOM6TMZiwg1lxHDR0D3bqZJn9aOmNGiGwxK/fr2iB83vsEYVapYGalSpDYEEE6WcWLGhl29+n6GSiY5QrgIhsm3fr36iBopqsGENLRvhNgxYkobuu/wHnoMGCj71JTTzWPXnv3o2KkLIoQNBze3Fkb/HFOSRIlFkGAhrzdZkqRwcW0GMkixY8XBgIEmQcJoZGaouQCoQGUKE7ROKOIk0rt3H9lt1qK1MCXqGIUhuiuQccuZK6+4T6ljAwcPRYUKldC2bQe4ujYXK9PyFaskFoDPIZ+kzp27iCVHTUK0clAzPHfufLEcsS8y59n/l1208syoFDZ0WPTra2J+ixUrifLlyuPpM5MbiDq35S/bjDcLfCwnU+DhYcKUJnVqPhmwS8bOrn5DkAlhTBE1+Ozfv4sWJ3dq8Nq374R27ToiQ/qM4P1XxMma942afkuikEQNqSJaBvMXKIQbN27Ie1mlak15JnmcDDSFA9s69eDm2lwYKQoHT549F+ErRtQYcHc33RPWp0WuqYMzChcqYmSTUu8qF7t580wuPm7NWoJWS0V79uxDgQKF8PSpKYi5VOlyOGDW0k+e5CXaP1VX/dLdknFoU6f4WinmzZ0vVgflZkhXlKZNnFQTP7+s4+DkapjaGfhKTfCOXSZGjMJBqVJlpc3KlauRJ08+dOjYWawbqVKmBp/zgIjaNEt3vPLlKsCunkmAohWObgaKuOA3a+b77ly+fBVcRGeZGXW6AHJOpPBIxvfixcvCNJJxy/5PduTJnVe6opUrZ448oGZVUYWKlbF9xw45H7ctiZafNm3aShHnBg+PwcZhJs1wcTExUaPHjgOZQBK162QOyKgp4jvBeDnSrFmzUbOmaZv7Bw4eRNlyNsLQ0e2sUWMHmZN5jBaEAwcPyzVJY4t/tWvZGoyNzxkfib9Qh0uVLI1yZcuhVas2sClfQd5BusVSofK//+UwLL+0KNB9jDRlylRZY6idp1UzdszYqFPHVnXp55cKmTChQoMKGCp3+JweNrtMqoq0UG3ZanKn2bTZZPHOmzuvCCSqjvrds28/WrVsLe8hGWsVjK6Oq18yp61am+4Hy5YsXS7zETXjpHp2DbHKHGviPxZF9UFlCdc49sN5mcq/4sVKyuEVq1ajSvVahiWGLnDW1tnElYYV2nfsIpY5btMCSR95RYxl8Rw1WnaptChbtjwaN2oq94Brj7IGFCpYCP3NDCndhXmvLl26YsxnTFJDi2/xYiVAqwSJLrL0RlBuNtdv3BIFx/Zt2+U57+HeT2JzWJfzCBUZSliRDsz/6BZLC4Ei27p2qFKlmtoVZeMSs8sN3eO4HlAh1LBhY2GU53svlnmdz74ijumcOQFDfbsGcHR0kUNkoGmNp1WIRAaWRFfRVClSypzEfSawyZI5m5F5j8pVehMoatmyDUaYraNr165HmjTpQQUEkz68MVvM6EJKRl8R1whaKElc6zhnbtpk4pU8PceIULNqtUmzr9pY/tJF2tHZDfvMsYi0ynJMao5u3bqNKJMt26htCpFTp/u61nI9s3Q159quBBK6Q9F6o6hunbqobLb4UkCh4m3aNJMyjpbMcRNMWLI+504qNjhnULHK9Yjvd0A0eOgItGjVxjhEt1t6AXBuJ49HgZDW5nx5C8h9VkJ14yZO8o6xIV3fqKg+bXZNHeAxSCzjE8abEsTQw0HTr0fgpwUQuvL0MWvFOTwyPBcumNx0+PCQnjx9LowR3WIsgz9pXqSGm+bkjh07oYn5paJvdPGSZQzN6tJly0X7cvCgyd+XLzGle0XUqo21cDWgm4l68Cn8UANLom9v+XI2olmhiwzjBm7cuBngAqj65mLKGJXvIWUBoXZS0SSzAEI/fdLgwcMQKUIkCdhXdahdHzNuoowxfboMxqRLN7HIESKhfdt2UpWLbvw4cUUbyALGqSSKn9DwGa5gU0lenus3TelGaU2IETU6nP1lWOGiETN6TGzfYWK2GtS3R9RIUXDrjsm1jH7NFHRevnojmloysgMHDlLDlQWDWiS6lyRNnETc4NRBTupcmBjjQ6KwQ3cuavlWrFglJl7llqHa8JcTQIOGTSR2gfv0C2ZmIrUIOzo6wb2XybWNpla6ylgSZVIKwEWKlhTXIR6j33/3Xn3EVMz7vmjRYll0VWY0+jkPHT5KfGe5MCrhh25jZFJ43X1698Xrt+8kQNXV2VWCSukvS79Y5XdPJp4awESJkggjZjkutU2tzqhRJusAy3gNA8wCCCda+kJTADl4+Cgo3FNjThM3hXAypiqOR/U3oL8HalSvJc80Y5W4EN2+c9dYTLhQ5sqRG2lSpzXMy2w7YMAgcdlS8VLEt2DBwjh//ry4Srk1a2VkpaGgSWaVftTz5y3A8mXLJQ5K+S7TEhXKKrRYQnluErVMRYsUEzc+tlNEgUrFzpQtV9Fwm+Nx+q3T/e7e/fsiBNNNY4dZC8qFnlZC/0QFBhkZ3lNFZAJsKlYVDTXLGFjOhTIgunTlqmjC6PNNogBStFhJIwWot/diQ0lB4ZtMCN9vno/+3NTyUhi3JPqr16hVx/Av5rGmTR1Rz7Ye3r77INdh6XfO+IqOHToZXTx68gz58xeS2DO6M9ItgVYduhUp6t7DXbTVLVu0NMZ35MgR0WKTqVdE17ZevfuB1hFmx7MkCkJkNkg8RhcuRWQiODeT6HrFBZnEhZpaRC7wisZPnIz6DUzKI/p6s19FZJhpPaQfOGO52rbvjIuXvx7gT9cICjUHDhzAmLHjUKlydWnP2LzSpcti9OjR4PvH4G4qDKhAWL58pViOlI88lR606pLoVtWoYWOJZ2DMGtcFas0DolEjPeWdp1sSiVZcKsksiTEuZJrorsd3OGO6DIgZNToWLfaNC1u6bKWfWBMKMRXKV0Dc2HHx6LFvpjDVLy0oSuhjGa0lZHL5PpD6DxgozzK3p02fJRZ9OWDxj+tgpUpVjDVuw4aNhtsU3U6rVq9lZHyjS3OBgkUlPpBdkClV8QdPnjwBBVb66nM+ZEwh4xNJ9FyggEMcKezTUsE1hjjly1cAdOslXec6XqwkLl++LFaO3n0GyHzGeYqxFLTa0l+fFrhyNpVw957Jz5/Wm2zZsmP3rt2iOOzctedn1h6LEAs5F/9NnTrNcGnkPhUmtWuZtOp0IaXrKJPkkMgT0ALmvdBb3jPGmjKGiYx4L4t1Wyqb/9WrZydubNylSxstKnQxJDHuoUPHLuKRUc+2LipXNgk+XFsoFCtLBC1SdEFTxHWSQoMieiZw7U+RLAXWrjNZlqnkaGCRRIPvE4USEplsKo4YI6SoVy93RI0SHVQgKvcxdYy/VHbSMsgEOSQqsWjpUZkAmzVrjhYtTFZUy3bcpmsa3e8UUaGorOgso2JXJdqhcom8mSLbOnWNOYjWG1qWGYtEops+XbwUHT9xUlzgqHzhGkBlI5VyARHXSVo6/NPUaTNEAF63dp3MxVxLqeQjERcqpinkk6hQqVe/oR9FA68jTOiw4pURWMyYdKD//RACPy2AcFFkqlr/RM01BQy6BFF7dvnKNZDB5sOiiIwD3UxI1JzxJSDxZpcpVwG0JJD4S5Ph2XOmVJDMJuJioW2n1qN9e1/zNF23PAaatHlXrl5DqzamoFL6liphRDoG5KEO7BshTM+2fr3fAGfV9ku/NM2TaWXaT0V8GVk22tNU1q1rd0SMEAnXzGlqe/ToiYjhIuDVm3doYFcfKZOnxE0zM8d0thRAOnYwXUfNGrUQP2481TWcHF2kb5M9CGICp0vUCgtzf6zoMdHGzGyohkuWMYOLFWjdIBUuWFgmAjW5N2/WApHCR5RjdBdhXWXBefrsGWLHjIMO7drj8bPnCBs6jGjcVN9MzZw0cVIjsN3La5q0V/eU9ejj6v/F5sTUsk0HYaJZZ/uOXRKbovqlVm/o0OGyS6aKrjKKyPjt3X9QNCWMXThsTuNIzbHHoKHy/Km6/KXgwEDDIkVKCAPDMjJ6apKiXz6JjDZNsucvXsagwUPEgiIHzP9oyuZCoOILOKnSnSmgzGt29RoI46TaUxtLBpnERb9q1Rp4+eKlbPO5Vb69PM7+GOhnSXR/cmtucjlR5U+fmlwZ+Dz06echMTh8hqtUq2kEPFIA4UJHxoFEpjp/gcLC1FAB0N4iFuXatWuoULGKH396aqM2bt4qvuF83kmMHWHMDbHYtduUL5/aNbpvMMsPidqoTRtN+f2pLbO0QFLIsjSl0y3p+AmTNp8WgyZNHKUP//9o8SxfvqJR7DVlKui3TmsBaeHCxeJ7blSw2KC21c6+sYEr72X5CpWNd2L9xk0GM0Gmk1piS2LQrv9kFHR/4HOpmBPWJ7OiXO+o1FD553mMjFCqlGkMZoHBxcVL0BJ2U5gCBlRTCKTLDRk2xo3Qj5s01WuqBIZymwkL6JOtAmVZRusQGV+T+1t8g5FmYogixUoa11mubHm4OPu6qVHDqlwdOWermCkyXoUKF/Uz59PtT823oz3HoHGTpuIawiBfWrAoBCnifbJMZUrlQEABs1w3hgwbIfFsffoNEJcO9sH3oUzZCqD1UhGvjckWGKBK7T9jPUiTJk0x7h2xV3Fvqh2DygMiBsJyrqPbrCKuZYr4Xu07cFisQbw+Cs60UseKHkO8AVQ9Wh2ZuteS6HoUL46vtdryGF2iKlc2xQSxnLFpZPaZwIXE+Y3WOFpMOXf5j4NgHTKitCBbkkqiQUaRsXJKmUMGiy5YKq6mU9ceIONH4jkpdLXr0Al29e39CNOMFWjs713kPP3w0ROUKl1WBEb2wTWiSuXqYhHl2Nu06+TnXWGyCK69GzZuEvcbPluPHj3Cnbv3UbBAYZw8aRqLq1tLPy6t1GArodPyOmkBoaVGkaODMxqZXfBYRkXWrl0mty4y0owBsSS6O42fYIqDsCxXGfJaNG8p3hM8xoQwdN8mU0sif0JlGalzpy4iJHN7z569okxggD9p6LCRhoss9ynIKQWnj4+PoUQcOnSYxIewDi0ylhZSrhG0wpL4HJQtawO6DZOUoEzFDAUdS6uCVBCF8FO0bN1e5mmW7d67T9wMFf9Ai6alUlm142+Tpk5YsdIkxFGpQFdJS+VvlcrVxILDumvXb4SDo++c4uLkIjEmPMZzFShQBOvMQhaVS7lz5jFORTe8woVNgtoZHx+06+B33jUqcn73XoRsWf+H5+Zgfx6jO/XmzVtFeWOpIKJbskpq06FTNyN+jIokrrfK7ZWutwoPZq5r5Y93sjy/3v5xBH5YAOFNZWBdzuw5UahgYVBbNHXqdHEbYBAoffm5uOTJm18CMTlEarzjxI6HihUqioaTky21NdSs8BsZXDxo+uViFjN6LHTr1lMWHsZScEGYNMlLtFYMRkuaJJnEhrBfBqnypaQ0y+xLFGqYDYMaLvpeUiNNf2MSg4kZ7DlixEjRvm7bscuY4KWC+R/HytgHxjHQDcvymxr0ORw+YiT4TQf/tHzFStF2crxFixTF0mUrZNGcMXOOLC70ZaUGnBoOB7Nlhn3Qj5ttBg0ZLvW4PX2GyT+S/trcr1a1ukwwdGXiPlMMk6jNixolGmxt64ofKd1wKIDMN8ck0ErA+mlSpvbjnsHJi0GEpUqWwsiRnpIiU0Wc6wAAIABJREFUkloDEk3KyRKbzrNu4yb5rgj7KFGspCwo9J3nPgMwR40ej5jRYyBr5myG3yQnMdMYvUzudvfuIXPmLIIn3X94bdQQMghOES0OdANJlDAxRo8ZL64UdJmKFCmKZIDh5JAubXrkzZNPtGeMQ/knW3bUqllbtCdcBOm7y+sNFza8+JCqzF4089esaSv+2ox32LBxiwg/tC6kS5dRAufoP0sfbFp5yOz17jsAAwcOlmd64JARYr3jc09NCZkF3kcKNhTO6Ofs1ryVxDXwGSYmXHT9EzO+MOCZwbiMlaBlgVpaPkvMYhY2TDgMHmyKXxo3foIwb4xn4jUxtoqB9/5p7PgJaOrojBEjR4lvPzWIdBGha5ZigMjQJUuSHAniJRBtHZlp+sH3HzAIY8eOE8Gc2biY0YTaNcZRzZ4z37g/FBwpHJBZ5vPIgEgG1ZKJIbNPoYsB18wvz4/OkQmiX+9krymiGXz27JloUpl8gZpEBuFevXZNhCBaAKZMnQYG+23dvlMWCDK8fKYZc0TNF2M6yLipBdYSAzJ/jEmhNm7yZC/5PtGyZSbNFjPokbnOlNFa4iZUliTVntfLAMn6DexBYaR/Pw9EjhQFvfv0k+vr2rW73BMuciTGFdAlh/MHkxwcOXbcsDapPl+8fC1zBpk0Pl+MPyETTSKzFS5cBHFxUMIln3smArCxqYzhw0dIXMnceaZvRqg+1S/dJFRWKjLhZACiRokqgjoZb8Z5UfinpZUBwEwQoIjJLzjHUogm4zN4iEmQ53Fqiukuw/mRzzwTN1BTSuLzTPzojka8TC6xWdGv/wBwvmDyDAbFkhgMmipVGglSv0Y8+w9ApIiRZV7icbplUCDt27efxCKQSbHMTCadSDxRd1D5M3P2HHn/GAeiMrwxaJ7uQZyDqT3mHE+G1L2Xu8w5FCBo+aPW2MoqlMQcETcynXQT5fzDeLB16/1+6I4MFYOwbWwqSD+0tKhYGzUuxp4wCQExtswktnL1WkQIG17a0e2HAnrXrj1EqbZt+y5xER42bAScXZvLvVL9Wf7S6kNBn88ex0grpIrtUvWcnVxFiOAz8yWaNm2mMIqcD+iuvGu3KfEC/e3p9jJ67HhR4jAInRYkEufj5MlSihKJ7witI3QbpvsiBZdZs+cYbnd0saRbLL+xxf45l7CMColwYcKhUIFCss8MgmHChBM3Np6DzxHdooYMHSbzGV0IiTmxypYlG+iOdPzkabHCcO2guy2fN2YiouBFl2b2QeWGf0sw1wbGG0WNHFWehctXr0psKPkEJmNhJk7GXjWo30Ayc1HjTaGB56B3Au+78s6gZpwpmEePHiMJAZh5kfc6UYKEyJkjlygKqJyi+y2tmuQzaO11cnQSrw1+8yVt6rRiMXZ1cUMoPoMrV4n7LL0VwoUNJ8HxDAJPliSZuCUxWJxZJ23rNhCrE+cWWtMYu8PYUM7btC5zbqXXAl3rbty8LRkJiVWXLl3l+1JcN6mko1DTb8Agw8qhnhW+B1xfkiZOJoIDlWodO3aWd5RKqkdPniBDuvSSdEK5lam2/GV8G5UajKvjHE43Ue7zPeSnFxhXli5tOjDQnFaU6NFjilBLpVTcWLElluPmrTvYuWsvwliFERdKrn18fmhZJU/IzHe0aHBdpbBM63WG9JlEgWgpTKhxsYxWV1rfqPQlz8XkDSQqq+nex3tJHocf2uR95tzJZDxUiEkSk/ETkSRJMomXpEDO+ZjKpsWLlkhSE+JJ7LgO+8/4qMahf78fgR8WQKiFoFtNyxatRFpm8DljK8gEtGnTXrIb8MXli6wmcWoS6HNJ1x9qWUn0XWcwJ/1oqUFgFhD21aVzF9G6UavC+A4GIjGglYHZDHhnkBcfLmoE+w0YKBMFg20dnUw+muybExv9TZs0birxKQoe9kPzMs2FKthMHVO/ZFzJgDJzFK/H0q2B10PTe0BfS6YfL90tyJxSMKL2kOkEVTYKMsnM7uNfw8APQLk2ay6Bl5evXgctJEoYoIa8Tdt2IuRxUejevYcEXCpTN8dMoYgLH7P+0IzOAFRFNN2zPy6a/oMgqXVidg+aeZX5ne2oVWQ5X2imjSQjS3cfxk+cN2u8ySTSpYgmdGr/WZ8xNyRqI3k+ajLUpEHtMl94PhNDhpo0uGqM/GXCAVrTHB2c4Ok5Fpu37hC3kKYOjpJVipp2N7dm4ndMRpV0/fpN1KltK5ou9ZFH4ta6VRs5l2WgJp9HamPt7RvLmNW5OUaawhlcR5cDPn87zRp8XiMnN2ZrUUTGkQJ2pcpVDSGRghQZcmqmmP2DvsMBEd0Im7doJRmDli1fhbZt2sk7xIA3fq+iSVMHCVRUwguDQ/msduve009KSP99U8NboVIVyTrFY3RDIQNGJQGJi0nbNu1BFzYuBlyAqeVes3aDvH8UnEl8prlw8P1isK/SnvIYLWX8IBuDKC2tO1yMmUWFWc5IbEPhlswL41jUO0YLKP3quUAqwZDaY7plUbOmnh1awaidpEae/sV8bphNy8XF1cBbTmTxj4waGX2eb7dFimsqM9gPXTx79e6LuxZpeFVzuu7QesM+GMfWzM3kVsCAfCouGjdxEMFD1We2NZsKlSSuSZVZ/tK6QCUImbp+/Twk0YA6TqGM95huopYY8ri7e1+512TiAyMml6CQ6N67r2Racu/dR+ZOugPyuV+7bqPMo0r4tOyL74ZNxcoSBK/KP338JHMWBU7OS8wmowIwWYfXQyaAiRwU8Z3nnMGFmvdHERkJvj8quJP3jxYRFfDLenwuyLzSdelLX5amqyQVP5xPGQPFYH8KRyrFMudjZqnhc6fSa/bs2Rsurm6CxYOHj4XhcHBw9OPm16fvAHlvKUj6J1o5qN1v2bK1xDvQAkyXGTV/sf7W7TvQvEVrGRtT5SoaOGgIunbthkGDBsszzoBkWq5Jx46dkDmHQbnMFBUYMW08Y4I4ryrrpGV9atJp5VV+7JbHLLepwaUnAIUgMvkkrh3M2sd0v5zTlMaex4YMGY7GjZrIPMMUrSRixeeMAhGFQVrxFf501+GHh5mhkBmeSEx9S2aW8wcFQrovynzWp78Rn0glJYUsJqxQLrBsS6ZYJQWg4OLsQr/9roZVkEq2qtVqyPquLNNyUvM/Pq/M9ujk6CyxDazPdZuujQyspxsP5wF+oFe9d8y4R1dtXoey2LI7PsOMGWCWuWHmLIcbN20WvoWuifRaIL9CaxbXON4n8hqcbwcNHiqCGK2HnOdpvejQoSM4v/J6ab13cHQS7Bkb5OzkIpgpaxyVR1RcKBcrWkHJR7i6uokLLeNE2zC+p2NnsWxSodHU0eROptYcruO89/7Xel4bnwXOCw5NHYXZ3rp9F/js8j3h/eJ6yee+dau22BhAGmv2QWsMlRiKKBTy48tUUFBBxEQzFK44DzRu3ERieenaynMypuro8VNYu36DfKzZ1a2ZkXmTa4J9g0ZixaKgRWLMGXlKrvucQ5T1Qp1b/ZIP5Htbo2Zt4QtVOX/Jr9WsVUeeHaX04drGPqko5XtKQZp8Yu++/cXqTNfk9Rs2S5IbtTbS4kg3O8t50PI8evv7EfhhAeT7T/X7WtBdJKCX7fedUfesEQjeCOzYsUN8pIP3VQTN0b969Qat23aU+KA/OcKjR48aPtbfe14y8rt3m2LBvrftr65PQZBuOP6J6bdVinf/x/4r+zt27TFcZ773mqmwG2B2Tf5aW34hmkoESyJTSCWOJo2ARkAj8CsQCPYCyP37DyW/P1PzKp/vXwGM7kMjEJIR4EcC6U5g+bGvkHy9f/LaqP2llpcuo3+S6E7DgNeLl0xJQL713Ixj4bdS6L4WFIiaeeb7Z8we3bOYTYlaTGYIpBvEf42YgYcJLqhpnz13Aa5c/fIHML+EDd1KGCdJS5Kyrn6pLsv5TR1qlG/fvi2pten+QwGQFnNNGgGNgEbgVyAQ7AUQfriHMQHMnqOyOvwKYHQfGoGQigCz1TBANnWqNGKGD6nX+Teui+5m/CZJ9v/lEN9o/z78v2tMjHmjq0KaNGnl/HRJ+FZi9hnGZdAKojLgfWvb31GPwevTZ86SAFJ+fJQ+5j169TY+Lvk7zvkr+2SKUUu3rZ/tm/EL/DAv43IYE/AjRJfaHDlySXC3l78PBwfUH11f6CrLAGcmPejQsasfl6mA2ugyjYBGQCPwPQgEewGE/skMVDL9fh70+z1gBPe6tAAF5Dcc3K9Lj//XIkAmmUGbDNQLLAPcrz3rf6M3Mp4yF714KdYl/8GyvwsFvvu8lwwg5vm/x1LAzEPyLDx6HOAXun/XmL/WL+ORLl26LMHw/KhYcCEms+DXun8VMYvfxYuXwKQbP0qM2aCPPdOPMmXstxAzFBJ//qksdt/STtfRCGgENALfgkCwF0C+5SL/K3VoHrdv2FQC4v4r16yvUyOgEdAIBBUEGCTL9Kf8ONzXAsWDypj1ODQCGgGNwN9AQAsgfwP133TOOXPnI3z4iBhm/rLqbzqN7lYjoBHQCGgEAkBgy7YdCGsVGuFChZbvIARQRRdpBDQCGgGNAAAtgISgx6BWrTqSh75w4aJ4Y/HhrBB0ifpSNAIaAY1AkESALkv8KG70SJERI3IUiV1hbI4mjYBGQCOgEfgcAS2AfI5JsCxZtmIVYseIhQRx4iBi2HCYMDFoZLQJlmDqQWsENAIage9EgN8tih0jJhLFiyd/MaJEw3LzNzK+sytdXSOgEdAIhHgEtAASAm4xP4LHj7mFsbJC+tRpEDlsOBQpXAyPA/hSewi4XH0JGgGNgEYgyCFQtVp1RIsYCWlSpES6VKkRNWJElCpVJsiNUw9II6AR0AgEBQS0ABIU7sJPjoFfPo4cIRKSJUqM1MlTyF8Yq1CYNHnKT/asm2sENAIaAY3A1xBYt2ETEsVPgASx4yBtylSiCEqaMBHixIyNBQsXf625Pq4R0AhoBP5zCGgBJJjf8nfvP6BGzdqIEi68CB60gKRPk1Z8kPPnL4gnT58F8yvUw9cIaAQ0AkEbgbJlykn8XfJEicX6QSEkVbLkUla8WEnoWJCgff/06DQCGoE/j4AWQP485r/0jIuXLJNFLmr4CEiZNBkypUsvgkjsqNGkfOiwEb/0fLozjYBGQCOgEfBFYNLkqYgYPiJiRo2OZImSiACSIW1a0ALCmJBQVlYYNmKUbwO9pRHQCGgENAI6C1ZwfgYePX6C4sVLIWb0GMiQLoNYPuiClS51amRMlwFxY8WGtXVW3Lh5Kzhfph67RkAjoBEIsgisWr0OnqPHYsfO3Rg0eChiR4+B5EmSYuy4Cdi2fSdGjhqNNWvWBdnx64FpBDQCGoG/gYC2gPwN1H/ROZ88eYrlK1Zh34FDOHfhEura1kPoUKFgW6cufM5fxP4Dh7Bq9Vr5MvIvOqXuRiOgEdAIaAS+gMDho8cQIWw4xI8dF1euXvtCLV2sEdAIaAQ0AloACUHPQOtWbcTtys2teQi6Kn0pGgGNgEYgeCCwbdsORA4fAQnixsPRY8eDx6D1KDUCGgGNwF9AQAsgfwH033VKV9dmIoA4Ozr/rlPofjUCGgGNgEbgCwhs2bINkcNHFAHk8JGjX6ilizUCGgGNgEZACyAh6BlwcXEVAcRJCyAh6K7qS9EIaASCCwJbtmoBJLjcKz1OjYBG4O8ioAWQv4v/Lz27q4ubCCAOTR1/ab+6M42ARkAjoBH4OgImC0gEJIwbH4ePHPt6A11DI6AR0Aj8RxHQAkgIuvEuziYLiBZAQtBN1ZeiEdAIBBsElAVECyDB5pbpgWoENAJ/CQEtgPwl4H/HabUA8jtQ1X1qBDQCGoFvQ0BZQBiErmNAvg0zXUsjoBH4byKgBZAQdN+1C1YIupn6UjQCGoFgh8DmzVsRPnQY+QbTIR2EHuzunx6wRkAj8OcQ0ALIn8P6t5/JoamDxIA0tG/028+lT6AR0AhoBDQCfhFYu3a9zMFhQ4XB3n0H/B7UexoBjYBGQCNgIKAFEAOK4L9Rv769LH716tUP/hejr0AjoBHQCAQzBE6f8UGLlm3QsVNX/SHCYHbv9HA1AhqBP4uAFkD+LN6/9WwrV61B9x69sXrt+t96Ht25RkAjoBHQCGgENAIaAY2ARuBHEdACyDcid/q0D+bMnotZs2aDmU4siVqvTZu34PyFS3j37p3loV++fePGTVy6dPmH+331+jX2HzgoX+l9/fr1D/cTWENLDHzOnsMZn7N+qr979/634+TnhHpHI6AR+CMIvH7zBuvWb8DmzVtw6fIVrFu3Ht7ei8AvhP9p4rxz/sLF7zrty5ev8Pjxk8/aLFy4CLNnzcG8eQvw7Pnzz44Hp4KHDx/h7LkL+Pfff4PTsPVYNQIagRCGgBZAvuGGzl/gjdgx46B2LVs0bNgYAwcPxfgJkzFnzlxpvXrNOkSKGBmZrbPg3v0H39Djj1epVq0m0qbNiHfv3/9QJy9evkKlSlUQIXxE7N3/632UL1y6glOnfYyx/S9bdkSNHA0fPn40ytas24gL38kYGI31hkZAIxBkEXj6/DkGDh6GbFmyIV7suKhdpy5cXJsjcsQooIvox08/z/ROmTYTVMR8jTgfx4+bwM/c87U2K1auRn+PwUa1bdt3wK5+Q5QvXxH2DRrB0ckF6zdugYfHIMyZM8+oF5Q3zl24iJmzTWsVx9mufUfEi5sQd+7eM4a9dftO3Lj5dUyNBnpDI6AR0Aj8JAJaAPkKgBMmTJa4ikGDhvip2blLN8SIHgsvX76U8qKFiyJZ4qR4/fb3WkAOHz6KFStX4e1PWFo8BgyUa9qxc7efa/rZHWo8GzRsiqvXbxhd7dmzDwsWeuOjWQChm5ijsxtevX5j1NEbGgGNQMhCoGePXjLHbN+xSy5s2LARst+0iSNoZfhRmj9/IbJm/QcPHj78ahe7du+Bt/divP/gq/z4WqMNGzdjxao1Um3J0uUIbRUaDRr4TepBxj2zdVZUrFA58O6CiIWhWtUacHJyMcZ6+sxZLFu+Ek+fPZOy7dt3wto6q1isjEp6QyOgEdAI/GYEtAASCMAbN22RRbNKlaoB1lq4aImxmJYpXRYF8xfE3gOH4OjojClTpvlp8/zFS1nIihcviSNHTV/IvXv3ntRt2aIVLly8BE/PMWhg3wiv37zFzVt3YG/fGC2at8Kjx4+lry3btqN1m7bYuGkzPpgXVSoU+/bzQNWq1WFv3xC3bt02zkttXokSpdC2bQc8e/7CKKf2LnrU6Ni3/6BR5n+j/4CBKF68FA4cPCyHrl67Ltaf5s1a4MTJ0xgwYCCaubXAg4emsZ087YNy5Sogfpy4KFumHHbu2oMtW7ejQ8dOWLXatKBv275TtKIJ48VHj569sWzFari6usHBwVEWRJ7Iw2Mw6tSphxOnTvsfkt7XCGgEggkC7r16y9zJOYt09doN2Y8fNz5u372P6dNnwrauHXbv2SPHXVyaYey4CbJ9+85dNG3qhBo1amH8hElSxn/Llq9C5AgRkSBOPLRu2wE3bt6SY1euXgctw87ObkbdmbNmo0MH37ln67YdqFWrDkaPHouDh47IvDt16nSjPjdOnTqNUZ7jpOzl6zdImjgpUiZPiYBsNkeOHsfgocP9tO/QoTMKFy5qzHe0Unfp2h2NmziAyp558xfCzs4evD5LontXnjz50Mu9j2XxZ9vLV6xCgfyF0KVLN+PYkKHD4ejohD37DoDrEa30q9esleMct7P547S5cuSC9+Kl8Dl3AcOHjQDPSdq9Zx+SJUmOeLFiw82tBfoNGAQKifymVI+e7lLHx+csipcojWXLV8i+/qcR0AhoBH4FAloACQTFtm3ayaK5as26QGoBXKyqVqmGRPHiY9wEL9jZNZB2i5csk3bU+BUtWhzt2rUHF8b/Zc+Js2fP4c2bNyhYoLDUbWDfGN5LliNlshRImzY95i3whnvvPnJsktdU6efKlavgAp4ubXp8/PRJykqUKC3no6YvU6bM6NOnv5T36dsfefPkx8qVq5A+TTpUqVJNyvmvfz8PxIgaPcA0kS9fvUa58hXg4OCE2bPnorxNJVmYaekpWNA01tq162KS1zSEsQqFli1aS790PaMAFsbKCi1atMLNW7dB7aeVlZVYhliJefHjxomLksVLYtOmzeKHXKJ4SamzYIG39ENriaOTK+7duy/7+p9GQCMQ/BDo1bMXQllZYc7ceTh77hxsytvIez5j5my5GL7vnBtq166Dm7fvgfMYlQ8vXrwUZpzCwoIFC5ExgzUG9PeQNidOnkKmDBllPiMzzrqbNm3BP//kAIWJnj3d0cI8H9GSwf5TJU8pbalASZEshZQNHzVGlCk8znlT0cGDhzHKc6zsenqOlbqVKn7FygHIXGxXrwHq29lj4UJv5M6dD5u3bJV+qlWrIf1UsKmEaTPnIk6s2KC1nEom0oyZs5AlczbMnzcftWvbolWrNlLu/x/dgPPmzY85s+dIva5du0uVkaNGS/+5c+XBjDnzkTxJMmTL+g8emeNYRo70RIQwYVGjek2c8TmHh4+eIEXS5IgWJZpYoW/euoWCBQrJ2sVz0C3XyclZ+ixVsoyc48GDh/AYOBjHj5/wPyy9rxHQCGgEfhgBLYAEAp2To5MsosdPnAqklkkAsSlfAfFix8GDR4+xdv1GmcAHm922lixdgbChw8LLawqOHT8hx2gxIfXtN0D2lT9xofwF5Zxk4KnhixYlKtyatTDOX6eWLYoWKSb7+/btR/RoMbB7z17Zf/v+veHHO2zoMPTp00+Eo+z//A+pU6Y2+ujf30MsIHv27TfK1MbhI8dkPJ07dsb58xcQLlQYZEyfUQ6PGOkpx2ZMnyn7ieInRPmy5VRT2DewR2irUNi1Z5+U3bx9F5kzZUaO/2WX/fcfPiFH9pywTBNMTWKMGDExwGOQ0Y/e0AhoBII3Av37DUCEMOFQs2ZtOLs2Q9Ys2TBjxizjoq7fuImY0WKADLqldXb+goUyx+zYaXLdKlu6LNKnS49PZncmzrMZ0qU3+mnVsjX4zY2jx45j6pSp0nb27Dly3DqjNfLmzmvUrVvXDgnixse9Bw8lUyAFkH79TAobKolmzZmPK9euS/0WzVtKX1UrV/VjAdmzZ68kIbn/4CEOHz4iqXZPnj6DmNFionOnLvA5e1aEDAo7HPJkL9OYerv3ln6LFikqHylUMXzFixZHhrTpcPnqNTSwa4CwYcLh3n2/7mUUnmLHjI3ixYrj2o2bKF2ytOyzQ65NEcOGR51adaT/BvXtkThREpw4dUb2z52/KPi4m60ZLKxcsTKiR42G/QcOSZ3q1WogccJEsq3+cX1KliQpnjwL3gH36nr0r0ZAIxD0ENACSCD3pFmz5rIILVkWuOn55as34ISdKkUq6W3W7LnSborZcjHScwwSJ0iEUiVKwd6+EerbNYDn6LGysLVv30nq7jcHhKdPm14Y/jfv3ku2qrix46J5i1bGKCvYVIRN+YqyP27cBCRKkFgsDUYFi43zFy/DyaUZokaOgtKlShtHBg4cLIJLQALIxs1b/8/eVUBHkWxRdoEPLO6yLIu7u7u7u2twggR3d3d3X9xdgru7u7sEuf/cN1PNTAgSSFjYvHdOMi1V1dW3q6ueNyKGC4+M6TOgbt36qFi+Irp2tbkGdOjQSfrKLGCvPN4gXOiwKFG8pNVuieIlRGChqZ909vwFxIoRC2lSpZZ9Lqy03tBa5PHWZsHhCWr9yAwwgJ3xI+/s1h2ppP8UAUXgl0OgXdv2CBokKPbtPyhuqp7dmKiICfh7QHSxM+bmBmnBCBwwEBg7RipbphwiRYiEK9dsrqW0HsSNHQce9piyRo2aIEyIUGIVqF2rjgg8Gzduwv2HjxE7VmxkSJfBNA3OnbFjxpJ5d9bsuTLnDBkyVM7fuHETLVq1tbLz9enbX85zTnOk/gMGI2GCRIgSKQoSJUgsyh8qnOiqlTFDRnGbrVqlGrp27Q66x9JFinPb+PETpJmUyVMifpx4sv36zVuxPuTIlh2uzVuhWtUa4oblGBzOgtu2u8u8WrhgIbRya4vy5SpIEDzPreeX1/0HRIvmLaXNMqXLItgfwcD4FxLdYAP87h8tmreQff6jNSpo4D/gvtOGMTGNGjmKk2vYhImTpd8NGzUVN1tmN1NSBBQBRcAnEVAB5DNoLvhnsUzCxhTtuagJhORCU7hQEUSLEhVkq+fNXyj1xo4ZK1VGjh5r2x9nW4RMO1yUad3gAsVFk5Q4YWIRZOjWdejwUYQKEQpurduaKqIxzJvXZnXYtHmL1B012uY7bRUSf+llSJgwMRibUbpESXDhM8T4DcaAHDhgi0Uxx/m7fcdOabNHj16Oh2W7SdPmcm7lqtWyuEYIG97SvLEAtW/URq5cvVbKn79wSbSVaVOlkX0uYqlTpZV7kAP2f/RDDhk8hMSxUDuppAgoAr82Ah07dETg/wX+ZFZApucm49zHk+WzT5/+Evi9b58t9ixf3nyI/nd0y0pCJU682B8sILRUBArwP9Da6khHT5wElTmZMmS0DnOOjvF3dBFeGGDOeXf4sOFyninUqXx5b8/Sde3GTYkBiRr5T1AZ5EgjRo2Rui1cbUz96jXr4P+33yXmxLEct9u26yhlR48aLafSpkmLWDFs1mhmBkyYIDFogfgc7T9os0rTou2Zlq9YJQJb0yY2JVWF8hVF4cQ5lbRj5y749/c72rRuY1UtVLAQQgULIcIhD9IKEyVCJDxysHa8fQ+kSZVG+r52g21tshrQDUVAEVAEfAABFUA+AyLXovr1bVaQ1q3bWtlU3rx5g1FjxmHR4mVSm65PGdJlRNBAQfD85WvQz5mLW+9eNt9lmvX/CPwH4seNZy3IFy9dFj/g6tVrStnV9o8H/hkpMkIEDYbHT55hz559cs7F5UNwZZZMWcTHlxfmgkGXp4zpM+Lho8eSGYvWB6baZSA42zmkQN0jAAAgAElEQVRy/CTSpU6DWNFjWm4M7dvbFkWzSDlCcPnKNfEPpobv3LnzcmqBtPkCzZrZBJAlS5fh5SsP6Vu+PDY/YRasWbO2uF24tWkvQfV0R6OVJLZ9wWWZ9OkyIn26DHjwyJaBxVybPt900+A3QpQUAUXg10aArlGcA2np8IoOHjws51u2sGnuTZm9+/bD/+8BMGnSZLx69RrRov6F4sU+WFnJLNNCe+fefalCKzCv06B+Q9nnN5mYXOPKtesIFSIkEsVPKMep7OE8GYKxDy9fYfqMmVJv8KAhcr5RY1dxOZUd+z8GjlNRU7Rocdy7b7seTw0dbosPaWaP12BGv/jxEiBk0ODg9UkMjGdGRK4b7F//fv0lBTGFj9DBQ1oClZub7TwtFSS63TJWw5FevnqN8uUryRpi4jAu2zMNMhaG7VerWg1v3r4D52Pum/bYFvcb2vFhRrCc2XPIsQ0bbd+zKlWqtFhETpw6I7F75tr9+g1A1D//8va3VEx9/VUEFAFF4HMIqADyOXQAmdQ5yceJFRdFi5ZAteo1UbRoSYyfOBn8oBOJweZ0hUqVIhWGjhgt7gCRI0VGnjz5xUeYZaipYk76VKnSomv33iKI0I84U6asEgMxctRYzJw9Tywg9MflIjdi5BgEDxYC2bPlFKGC7gz0LY4ZIza2breZ2KdPn4lwocMha9YccG3hJtmnPDzegGmD6efbvmNXdO7SHX9G/hPjxk8Qv2RmZQkS5A8w84xXH6NiMGLI4CGRMmVqVKhUVdwMrl67gYwZMiFK5Cio36ARunTrieTJUuDPyFGtjCq02MSLGx8FChTCnbt3MXfeAnH1YsrKtes2ClYdO3RCqJBhMMeehUUOApgwaSq2u9tiWcwx/VUEFIFfCwEqRZiVL3XqtOJKmjNXHpgkGuZOPN68QfMWbogYPiISJUr6kQsp4yYyZMyCXLnyoVz5SjDMNuszU1TYMOHQr78tAxWZ/2rVqotgkTdvAbRo2Vq+ETJt+kyEChkaKZKnwv6Dh7F1u7tYllOlTI3hI8egfPmKiBQxEgoXLobde/eDc941e1Yt00/+UqCoXr0WsmXPhZq16qJY8VIoU7a8WEsYm2HowKHDskbEiR0PVMAw9oL3ycByxuk1buIq30dhPB1ddQcOHiZVKVyUK1sBsWPGQY2adTB33j8yd5p2ze+r1x4oXLgoov31N2rVdrGEFGaqChM6DGgVHz5qLLJkzoLwYcOjZy9bTN1TJg/JmFm+UUUrz+279yUhCud/E3e3fccOuT7TDd+zZzXkdRnIPnjoCPjWB2vNvemvIqAI+E0EVAD5yudOv9xNm7Zgy5at8utYjQvXyVNncOfufdGiMW0hhQt+iff+fVuaWpZnG/TNpfaPREsKv9R77doN3Lp9WywONP0z0JAf6mOGEi6+tERQG0iXr9Nnzkq+dsdUjiyzdes2K70v26b1ht/luHHzplyL8RhnzpwRAeTKlWs4f/EiTpw46aUAwgo3b9/Brt17pF3ue3h4yGLMOA72mSkruUDxHs+dt1lKWI79v37D5q996fJlsYQw4NQs7u/evQe/Kk+fa0fauHkbWE5JEVAEfl0EaMFkcDYZ93sPHorygvOMIzGFOOdIzmGHjxyVec7xPLePHT8hmaQ8fy+Ibktnz12wrLOmHuekdes2gHMO6fLlKzL/0aLAeZPz65kz53D33n2Zv3h9Zu7j/HX7zpcz7rm77wC/l8HsVkyZ7hXRCr1z126J3TPnL1y8hKtXr+Pq9esyZ7I//PP8hfadO3dh06bNeGBPuW7qe/5lPAjnekO8X6ZspzB0+vQZmUOZwIT3b4hB/kyd/ujRY5nHOc/a+vVhvuW8bawrjvV4T28d4vXMOf1VBBQBReB7EVAB5HsR1PrfjAAtSHv27sODh48km8sdX/6K/Dd3VCsqAoqAIuAHEKBSjHEjjNejZYgpeJUUAUVAEfANBFQA8Q1Utc2vQoAWJfonu9T/kGb4qypqIUVAEVAEFAEfR4ACCN3f4sdPhJ279+Lp0w8fsPXxi2mDioAi4KcRUAHETz/+f/fmqV3r2bMXFi1e8u92RK+uCCgCioAiIAhs2+YuGb2uqkusjghFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoRUAQUAUVAEVAEFAFFQBFQBJwRUAHEGQ/dUwQUAUVAEVAEFAFFQBFQBBQBX0RABRBfBFebVgQUAUVAEVAEFAFFQBFQBBQBZwRUAHHGQ/cUAUVAEVAEFAFFQBFQBBQBRcAXEVABxBfB1aYVAUVAEVAEFAFFQBFQBBQBRcAZARVAnPHQPUVAEVAEFAFFQBFQBBQBRUAR8EUEVADxRXC1aUVAEVAEFAFFQBFQBBQBRUARcEZABRBnPHRPEVAEFAFFQBFQBBQBRUARUAR8EQEVQHwRXG1aEVAEFAFFQBFQBBQBRUARUAScEVABxBkP3VMEFAFFQBFQBBQBRUARUAQUAV9EQAUQXwRXm1YEFAFFQBFQBBQBRUARUAQUAWcEVABxxkP3FAFFQBFQBBQBRUARUAQUAUXAFxFQAcQXwdWmFQFFQBFQBBQBRUARUAQUAUXAGQEVQJzx0D1FQBFQBBQBRUARUAQUAUVAEfBFBFQA8UVwtWlFQBFQBBQBRUARUAQUAUVAEXBGQAUQZzx0TxFQBBQBRUARUAQUAUVAEVAEfBEBFUB8EVxtWhFQBBQBRUARUAQUAUVAEVAEnBFQAcQZD91TBBQBRUARUAQUAUVAEVAEFAFfREAFEF8EV5tWBBQBRUARUAQUAUVAEVAEFAFnBFQAccZD9xQBRUARUAQUAUVAEVAEFAFFwBcRUAHEF8HVphUBRUARUAQUAUVAEVAEFAFFwBkBFUCc8dA9RUARUAQUAUVAEVAEFAFFQBHwRQRUAPFFcLVpRUARUAQUAUVAEVAEFAFFQBFwRkAFEGc8dE8RUAQUAUVAEVAEFAFFQBFQBHwRARVAfBFcbVoR+K8g8Pz5Czx99szpdjw83uDFy5fWMW6/9vCw9rnx7PlzeLx5Yx17/uKFUx3rhA9vPH/+HC9efOjbmzdv8ezZcx++yq/R3Nu3b7+5o+/fv//mup+ryHHg2DTH15eu5d37+FJ7n+vf95x7/fr191TXuoqAIqAI+AkEVADxE49Zb1IR+DYE9u47gOIlSqFY8ZIoVKgoFv6zGNdv3MCly1dw6PARlCxVFqVLl0GVylVRo1YdFCxYBOXKlseNm7flgj169Ebu3HlRrWp1VKlSDdVr1ELVajWRKWNm9Ojey1udGjFiNEaPGffFOu47dsGtTXsUK1ZS+lW1clVUq14TFSpURrYs2XDg4KEvtvFfKnD33n3UruuC7DlyYuOmzV99axcuXsbjx0++uvzXFnzt8Qadu/ZA0aLFZVwULlwUtWrXw/0HDz/bxN17D+Di0hCFCxXBrt17vSy7Zu06xIufEHv27AUFliqVq4Ht79y1x8vy06fPRLx4CbBv/0Hr/NVr13Hs+Elr37sbY8aOx8BBQ71bTcsrAoqAIuCnEFABxE89br1ZReDrEWjRohXChYuADh27CEO3d98+rFq9FvnyF0KXbj1BLfbOnbsQL3ZcBA7wP+zbfwCrVq7CH4ECI17cBDh4+AguXb6MceMnwp8/f8iUIZMILSyXMkVKOdZ/wKCv6tD0GbOkfNcu3T5ZnppnCkRt23fEmTNn4b5jJ6JGjiL15s5bgEOHDiNvnrwIGiQoxk+Y/Ml2/msn3r17hz59+gkOY8aM/arbW7p8JZYuX/1VZb1b6N379zh3/gK6du0mfapRoxaOHDnqZBHxqs23b9+hSuUqUoeCsFd0+vQZpE2bDpvsghaFYY69FSu9vpelS5chTZq0OOgglNZzaYBp02d61fxXHbt1+zZq1q6HAYOGfFV5LaQIKAKKgF9EQAUQv/jU9Z4VgS8gMHLUGGHcBgz8WEDYvGUb2nXsYrVQrGgJxIsb39rv0bOP1B01aowcu379JgIHDITq1WpaZdZv3Cxl8ubJZx178eIFVq5cjcOHj1rHuHH4yHEUK1ZKyk+aPMXpnONOl249UKp0OTx9+sFVLGvmLIgcMTIuXLoiRd8BiBk9Bvz/5h8rVq1xrI75CxaiV68+uHPnrhyn29aCBf9g8ZKlOH7iFB4/eYqdu3ZjxYpV2LdvPx49foKZM2fJsTt372H5ipU4euw4Xr32wLp167F27Tqn9h13Tp85K+2eOn1GDru77wCZYUMUoPr3H4hLl6+aQ/J79ux5DB06HKtWrZH22eeHj55g69ZtWLFyFegwdfXqNcybNx9bt223sNi4cZPgN336DKs93t+QIcMwa9Yc6xiFODLrKZKnQuFChbFg4SK8sbtw3b//QASZhf8ssspzY8uWrZg7bz7okufh4YEJEyeL4OlUyIudLVu2SZ9oMSDRRW7W7DlYvcaG284dO7Fk6XKnmj169ESUSFHgvnMPDhw4iMWLl+Llq1dS5u7du1i0eAn27T9kWW46d+6KBPESgONt2fIVWLZ8pdXelavXsHrNWpw4eVqO8T7bt++AAP5+R+3adZ3G4br1G9CvX3/s2LHLqn/p0hXMmTMP27bvkGP37t2T9un6RZey1GnSYcrU6VZ53VAEFAFFQBH4gIAKIB+w0C1FQBEAQJedzJmy4H/+A+Dc+YteYkIXmDdv3uDN23fIl68A4saOZ5XLkT2nMJZkXkn79h1AoAD/Q40ata0yHTt2ljLz5i+0jtFNhtrqAvkLWcdOnjqNfQcOYdHipfjNnz9MmDDJOue4cfbsOWTImAXnzp23Dr987YH06TLgz0hRsNPBZadLF5vmvUKFilbZPv0GIHeefMiVKw+yZ8uJGzdu4t2793Cp30j6RBezFy9foW69+rJfrVp1EUDoUjZp8lRcv3kb6dJlQOhQodGjV1/UrlsfAf0HxOQp06xrOG5s3LwF/v35R4UKlXDh8lUULlwMoUKGxtt373Hx0mWUKl0WefLkF0365Ss2IeTwkWNImiQ5SpQohdSpUuPPyH9iyLARePj4Ccw9UXN/+sw5aYvXP3josFx20aLF0m8y+CQKPm5ubVG7Tj0kS5IM1arVwKvXr/H6tQfquTTE7/78IWvmrOjew+Ym9/DRIxQpUhw5cuRGtuw5MXHyVOt2+vTtj/8F+B+6dO2ORUuWI1zYCHB1bWGd/9TGP4uWSJ/69O0nRejuRRw4BurXbyhuWqFChELffgOtJmjJiRg+Atp36IR2HboIxhQy+Kw4bjl2WH/BAtu46tGztwicbdp2QNfuvRA+bAQMsFvdKIBkzZIdQQP/gTNnz0msUras2RHYfwAUK1YCGzdvletSGE+ZMg1oqcmSJRvmzFsgx/lupEyRWq5HFy4KS7//FhC0HpF69+6L9Bkyg/FISoqAIqAIKALOCKgA4oyH7ikCPx0C1EqfOn0Wx06cwvGTZ3Dy9FnZ56/j36kz53D67DmcOHUGZ89flLLU3FMr7R1as3a9MKBJEyfFxcs2y8Gn6j9/+QplypRFhLDh4NamnTBukcJHwoRJUyztO12uokSMjITxEqBWrTpwbdYcbq3bOWmT2T4tJUOGDse6dRvkctQiHzl+Srbp20/G0jDQnvtDZi9B/IR48vSpderl6zfIkC69CCC79+63jo8aPU7aKl+2vBybM3ee7DOmhUSLCZlNQ+FCh0H6tOll15StUd1mzXEMdG/WrLm0M2DAQBw+dgLhQodF1ao1TDNOvxTcMmfMjEjhI2LG7PmihXd334kzZ88jVozYaNOmnZRPlSIVKpavKNYXPo909n706dMX4cKGw5Gjx6Xc0RMnETJYCLjUc5H90qXLIoC/33D4iM2aRCsO8Zs5c7acnz5jJv6MEk22x44ZK+dovSEtXbpc9ufP/0f2mViAjH2cWHFkf9euPQgcKIhYh3jg9t17SJk8pbjdMb7m4sXL2O5uswpIhU/8o1DJPvXt198qsXWbuxwrX64CHj55iuRJkiJ1ilTW+SFDhsv5Rg0b4+KVa0icMDEyZsiMp89fSJm1a9fL+Tlz5sp+/4GDZb9dWxueLvXqI8Bv/nH1+g05P3DgIDlPyxaJFg32iRYs0rXrN2S/tVtr2a9TqzYiRYgMxrGQhg4ZJucHDx2OGzdviQWE8VGkI0ePIWXK1Fi/YaPse/cf2zlw6CjOnL2Ak6fO4MSp0+A7zj++96fPnsfxk6et9/3QkWMSm+Xd62h5RUARUAT+DQRUAPk3UNdrKgLeQICMTa5ceZE4YRIkTpgIsWLElLiLuLHjwvEvftz41n7C+AkRPmx4JE+WAjdv3fLG1YDFS5YJU5UgXnycv3jps3WfPnuOIoWLIVu27JgxYxamTJkqwpFjpX0HDiJUsBCoXas2Ro22MbsF8hdwLOLldvv2HRE3dhwUK1IUmTJkRKRw4UEmfO/+Ax+VL1qkKKJH+1usFOYkXaEypssgAsWuPfvMYQwdPlLur1KFSnKsTu06CPibf0tgKl68pGBHSwSpa9fuCB0yNNas24CXL1+haJHi0o8jx07i/IUP+NSv3wABfw+A/QcP4dKVqwgTKgyKFinmlAXMdOLZ85diMaGA8eDhh+DrLVu3S9/y5cmLho2aSD9q16yN46fOyPGyZcpKE1OmTJP9gQMHy/6evfulbBu3NrJfqlQZhAwaHMeOn5B9I4DMmGGLbaBQev3GTUybORvR/oqGKBEjWcKMYcLHjZsodY8eOyHXqmkXymg5+J//gCherIScv3jpCmLHjINaNT9YuOTEF/7RfcqzALLEPvbm2y1jcWLGlsQBpqkePXpJHQqCrzzeIGH8BEiZIhVu370vRRYuXCTnFy60CU+0zlDgmGx33eOzDBQwEJYus1kp+vcfgN/9/Yb99iD00aPHiqVtid0djrFHPD92rC35Qft2HfFHoD/EpYsXpCDo/7ffsWnTFtNF6/f8hYuIGyceunXvaR3zzkbdui4IFCAQ+F7zjwJgvDjxEDdOXGmX2zwWO2ZspEmVBtH/io42bdp75xJaVhFQBBSBfw0BFUD+Nej1worA1yFw7fpN/B01mjBWMaNFF5cZMh1kPsigyV+sOIgrDEpcEU5ix4iFCGHCiS8/GU3v0M3bd5A8aXK5Hi0qXtHbd4ymACiAZM+WA2XKlPOqmBwjc0xGc9jQYbJvtNLV7VaET1Vs1ao1QgUPIfdLYShK+AiIEDY86L7kmQrkK4C//4qGF/Z4AJ6nlpoCCGMGdjhkQWrYsLH0p7VbW2mGWbJCBA0GxkSQihYtgeB/BMOBgzb3pVNnzooLWVm7xeTsufOIHSsOWrm1xZ17NsaX9Ro0aCTM7oYNm8SSQfchuvS8fPVxWtYnT58hcaLEqGgXguTCEk9hi4uoU7uuxJYcO2YTIO7cvY8qVaojSaLEmDt3HkqXKoO8efNbmaM2b90uQlLbNrZ7IrbBgvwhWni2bQSQaQ4xIBReBgwcLIIDhSWT+YlM/O+//Y4x9oxjtGDx+bVs4SbdJB5/BAqCksVLyj7HSPx4CSx3LXMvX/o1FhDjgsXyRgAZN84WFxI/bjxxBTNt9eptiy8aO3Y8Hjx6gmRJkiLG39FBIYg0d+586euChTYXLBFA/H24ly6duyJIoCCglY/Ut08/sfbRTZA0ZqzNOrZ48RLZHztuogiVU+2xHG1at0Xg/wXGytVr5Xyvnr3kvLEeyUH7PwqwxKVjpw/xUo7nv7RNASRo4CDyXvPdlvc9Vmzbex47rggefC8olEcMG07umxZGJUVAEVAEfgUEVAD5FZ6S9tFPI3Dz5m0kTpBIGIxJk6fh1es34vZC15fbd+x/3L57D/cePATTlVKI4D4ZZO9+P4FgM9sVmc6yXggWS5etAF1lDOXNmw8lS5Y2ux/9so9sq3vX7tY5luex0WNsjCZP8JshDJw2gdl0U+IfiYHRtvJeZ3Fq0dJNAuFpoXAkZt6KGC4CbtkDy9k224kbNz4uX70mRQ1Ta4LXKTiUKF4SRshigDmPlSpRymqacSUZ02ew9rlBtyC2TTckBoaHDhESBQt8iGdxLPzuPZAkcVJUqljZ8TBobWDsBuNoDF27ZnMX4v7I0WNRuVIVMC7Bkdx37kbg/wXCgP4D5DA14qGDh8S1G7a6G+1B/yYNr7FE0To1Ythw0fI/ePhI6jKWgfcxc9YcweDe/QdImCARcubIJeeNOxxjX0gUjpImTobuPZw1/RcuXvpkulzW27lzt1xnwsQPcT3u7rvk2OxZNlexODFjgcKlIQbgs2+0tpGSJKJVMPEHQWzzVjm/epUt61WXrj1kf/jwEVKezyjYH8Fw/4HtXkeMsFnDrtjjbCZPnirlN9vjP44et1l/hg6xpdWtUqkKggcNjidPbXEdQwYPEYsJXbU8ExMNpEufUVzaPJ/7mn0+j1u378r7LO/53Xsyjvl+M97l5q07Ms6ev3wtgixxoRugkiKgCCgCvwICKoD8Ck9J++inEaALViK7AOI5A5FvAkNNeOxYcVGiZBnJftSta3fMnjMP3Xr0xtlz58S1i8HkEcJFQKQIkcCUutTsOxJjNipXripMHX35R44aKx+ce/r8Jf6MElWOuzZvCTJb5y5clP1ChQo7NiHbzGBEBmvosOEfneOB/QcOIXmyVJbwsm2bO5q3aCXCBwOqm7m2wOAhQxH97xjiLma0/az7+MkT1K3XAKXLlJNvlbDM/gMfvgvBMjVr1UWDho2ta5ctVwGtHdxdaKVKkyad9LFTl25yn+wv7/HgIeesXmyEQfF/BA6KUCFCWtp4HmeK2mHDR0k7+fPmx8BBQ7BshY2Z7tdvgGjTGa+wefMWLF22HPxmBYkZuqhtpzvV2AmTRWDg9fndFLqiNW3qKm2WKV1WPgRJVyOer13HBa7NW8l2K7c2cn0KDrTwpE6VFjvswfsnTp6SgGvGxjBwmwKfIZNmOU3qtJbViOfoBsZrDB5is3yZ8rQ0zVuwUIRWnmfCAzL+/A6Ii0sDqVOtWk3Mmj0PoYKFFAsD75dUtmwFOV+rZh3MX7hEXMH8+/tdLFzMwlWxYiU5zxgafhhz0OBhqFGrNrp164nWrduK4MnAdNLNW7dFqGIfmjWzBc1v3LRF6lcoXxH37j8UAZjnUqVMg7ZtOyBRoiTo3ccWNM/6OXLkkvLNm7f8yNJFjKNHj4nbduFXLupL/+gex/vgc1ZSBBQBReBXQEAFkF/hKWkf/TQCFECMBYTBwz+Sbty6jQ4dOknGpCZNmmHWbFtwL/vAbzm4urbEiBGjMHjwUHTu0s3SRJs+jhg5WoLOmeq1b9/+4g/PjEokZg7q2bMXGjVqKvEIZBh79uyNZctWmOrW77nz58VdyGR1sk44bNSoWRt16tqCsKnxpwsLs2bRfYaxA26t26KXnfl0qGZtkrGsXq0GmG3KM5HRd/yA4d37D8T6ZMqRaadbELGYMXMWZsyYjZEjR4GpiE18gSnL39179mLU6DHo1q2HxNw4nuM2rUyNGjWWzFLmnMl0xTgTMpv8a9y4mRUQTcHEza0N5sydj8NHj6P/gIGYv+AfEU4mTZqCUaNGSzyLsXSwb4yNYDAzP8hHSxAzm5Eo0BEvE1DNY3Qz4gclB3n6yN6Chf+gV6/eGDhgMLa7f0hTu2nzFtSr54KsWbM7xcF4vHmD0WPHg8Irx0W3bt0xaNAQUIijRWLM6LGYMnUaxk+cLP3q3r0nVq1eIymGmeVr2LAR8kznzlsIMvlDhw7D0aPHxYLGLFnDho8Azz189BiMV7Hdz0FJLDB9us1ywmMcvwMHDgHHKMcmv5dC4YjCENsxH9NkWQpy9V0aigAuDdrrM43x0GEjpLxn4Ttjxsygu9+PoMr276OoAPIj0NZrKAKKgE8goAKIT6CobSgCvogAGaF/SwDxxdvy8aavXruGvPkKYeKkT38rxMcv+oMa5Dc4unbvieHDR2L37j3yUUW6W2XKnE0C3n9QN7x9mdVrN2DEqDHf5Abo7Yv9JBVevnyJDh07o0nTH+cOxTTRagH5SQaAdkMRUAS+CgEVQL4KJi2kCPx7CDgJIN/xheZ/7w5+3JWZ8WvYiNFwd/hg3I+7uu9dae++/UiQMIlYoPg9ElqRGKNB9ymmaf4ZaeE/SzDDwWL2M/bRN/rEwHXXFq1+qNBVpYpdAGnSzDduSdtUBBQBRcDHEVABxMch1QYVAZ9FwEkA+cEuWD57Jz+utfsPHvy4i/2AK9E9iG5J2bLlAF3NChQsjA4du/xQJte7t3nPIUOYd+v+yuWvXbsucU4/8h6YmEAsICqA/EjY9VqKgCLwHQioAPId4GlVReBHIKACyI9A+de4BgPKHz1+jIcPH0lMxK/Ra+2lbyNgxYCoAOLbUGv7ioAi4EMIqADiQ0BqM4qAbyGgAohvIavtKgL/DQQsFyzNgvXfeKB6F4qAH0BABRA/8JD1Fn9tBP7NLFi/NnLae0XAbyBgCSBqAfEbD1zvUhH4DyCgAsh/4CHqLfy3EVALyH/7+erdKQLfi4AKIN+LoNZXBBSBH42ACiA/GnG9niLgTQTUAuJNwLS4IuDHELAEEHXB8mNPXm9XEfh1EVAB5Nd9dtpzP4KAkwVE0/D6kaeut6kIfD0CKoB8PVZaUhFQBH4OBFQA+Tmeg/ZCEfgkAiqAfBIaPaEIKAIAVADRYaAIKAK/GgIqgPxqT0z76+cQUBcsP/fI9YYVAW8hoGl4vQWXFlYEFIGfAAEVQH6Ch6BdUAQ+h4AKIJ9DR88pAoqAfohQx4AioAj8agioAPKrPTHtr59DQASQhInlS8fTp8/wc/evN6wIKAKfR8AIIM2auX6+oJ5VBBQBReAnQUAFkJ/kQWg3FIFPIUABJGG8BDYBZMasTxXT44qAIuBHEbBcsDQLlh8dAXrbisCvh4AKILwhfPUAACAASURBVL/eM9Me+zEErly9hnBhwokAMnr0WD9293q7ioAi8CUEqlWpJvNDUxVAvgSVnlcEFIGfBAEVQH6SB6HdUAQ+hcD9+w/QvkNnVKteC1u3uX+qmB5XBBQBP4pAieIlRQCpU7uuH0VAb1sRUAR+NQRUAPnVnpj2VxFQBBQBRUARcECgUeOmCBkqNNq0ae9wVDcVAUVAEfh5EVAB5Od9NtozRUARUAQUAUXgiwg8ePAQFy9dwb37979YVgsoAoqAIvAzIKACyM/wFLQPioAioAgoAoqAIqAIKAKKgB9BQAUQP/Kg9TYVAUVAEVAEFAFFQBFQBBSBnwEBFUB+hqegfVAEFAFFQBFQBBQBRUARUAT8CAIqgPiRB623qQgoAoqAIqAIKAKKgCKgCPwMCKgA8jM8Be2DIqAIKAKKgCKgCCgCioAi4EcQ+M8KIC9fv8at27f9yGP8+DbfvX+P6zdufHzCG0cePHwEZlf5Gej6jZt4//79V3Xl2vXvu++vuogWUgS8iYDHm7e4eeuWN2t9XPze/Yd4/OTJxycAnDt/AXv37ffy3L918MbNW3j79u13Xf7mrdt44nDPt27fgccbj+9q07uVfeI+vvaaP/JaX9unH1WO8/fr169/1OV8/TovX73G7Tt3fP065gK7d+/F6TNnzK7+KgI/LQLeFkD27T+AoUOHYdmy5Zg0eQpGjxmLUaPGYMDAQTh67PhPcaNcgDNnyY5Ro7z/1eiNmzajZ8/emDhpio/cy6lTZzB23ASsWrUas2bNxoiRozByxChcvnLVR9r3qpGr164jQ4ZM6N69p9PpV69eOe1/bmfuvPlIly4Ttm3/dz98R8alYaMmKFiw8BcFkNevPVCtag2UKVPuc7fm4+f27T/o5dh/6Q28v7ZTd+/dx9p1G3Dv3r2vrfLJchMmTMKgQUPQr98APH5sY2jZ/ps3b7ys49P38+rVa/B6P5LuP3jgxMj+qGtTMEiTJj2GDBnmdEnvvJOsOGfuPKRIngrHj59wasfsLF2+Av0HDjG7/+rvu3fv0apVaxQoUOibBZA3b9+iU+cuyJevIG7ctCmU2rXvKPseHj9GAHn46BFq1qyNqtVqwqffAc8PiAqf6tVronqNOiDj+q109+59TJ4yDUuWLMPOnbsxa9YcjBo9BuPHT8Su3Xu/qtm7d+9h4T+LsHnzFjx79vyr6nxPoafPnqFBg0YoWbocqPzyLeKYunPnrm8179Tuzl27kSVLdowbP9HpuE/u3Lt33+n59O7bH/MX/OOTl9C2FAFfQcDbAsjGTVuQN19BJE2aHLVq10Wbtu3h6toCtWvXhVubDqCm6t+mVavXIk6c+ML4e6cvO3fvRbcevVG/QSOkTp0OxYqVxNPvnHgPHjqC6jVqI2zosChVsgy4eBYqXAxp0mbA6jXrvNO9ry576vRZRIwYCZ07drbqXL1+A48eP7b2v7QxaPBQxI+fCDt27v5SUV8/T+Ejc6bMXl6Hi8nFy1fk3IuXr5A5YxYUyF/Qy7KfO7hh0xacOv1tWqO0adMjb94CVvMPHz3Ghk1bvygwWRW8sTFm7Hj54vGateu9Ucu5KIXK3HnyoWChomjSpBlcXBqiV6++aN++I5YtX+Uls3jl2g28ePHCuaHv3Lt24xa2u+/6zla8V33Hrr24cPGS9yr5QOn9Bw4hfPgIGDhgoNXalavXvS0MDR48FJEjRsGpU6etdn7mjTKlyyJLpix49+7dN3ezVq06yJUrj8WUVqxYGWlSp8WbN99nVfnaDlEoKFCwMCpWrIJnPvwOeO7DvfsP5F4rVqqCV6++XcCihahd+04IFyYsggQMhBIlS6NT565Inz4jggQKgmnTZnq+9Ef7N27dQfJkKRAxQiQcP+n7441CB9fcUqXL4tHjpx/1x6cOPH76DBs3b/Op5j7bztJlK5AgQSJM8iGFplcX27zN/bu9HbxqV48pAr6NgLcFEHbo0uWrqFXHBfcePHDq35mz53Dt2vVPMl6OZng603zOoYbaM0NfWryo+fZM3Xv2wfARo50OO17f6YR9Z9v2HXj23KbpefToMWLFiivWHVOWzO6X+mLKOv4+f/5CJvJNm7dahwsVLIxof/2N1x4fa5t5Hc/kuNh6vg/P+6xL60eL5i2lGT4X1+atQI3Wp8iz1vvZsxdwadgEO3Z+nkH0rBH0rJX07DZF1zA+W0ccX39Bk3n02DGULFkGb71gYqZNn4leffpbt7V27TqQQSHxWl6RZ3zZbvWadTB79tyPinuFrek77433QuaSlkES9xs3cUW16rVk3/H+PV/3o4t94QCf0cWLl0VwffDw4SctFZ9rZsvW7QgUIBDKl6/oVIyWv5w582LGrDlOx7lDk377jp1Bi8WX6JUX7yLreH7Gly5dRstWbXHg0JEvNSmYfqqQ43vhuczbt85M77z5C9C+Q2dQUPUOcRQ5zkemruPz9DxOPO+zDgW8zp27SvWDBw+jddv2eO6JoeV1PjFsrXemUqUq2LJli+nGV/2aMfs1hT1j6jAVO1X3/EydTtp3Dh48hFKlyuLFy5fWab6Xjvh8qZ1r16+jYRNXHD9xUtqg9adE8VKW1c407PkeHd89U+ZTv16tIY5lt7vvQJNmLUDlgmf61PN69Qk3ok8dN+1u3eYO1xZuePr0+5nwbJmzisJi/YbN0vzzl68QL1YcObZ563ZzSevX84zp5tYGMaLHxIkvCLyeMXDE3vFZWxeyb3h4Wv+o8KrfsMlHFopPjRGv1k/P13Dcp+silaar1nxZgePdtnkdDy+sx1269QQVR45EnB3nD8dzXI8+dc7z8cmTp6J3n/548/bdJ/kuts132vMzcrzmp9ZKxzK6rQj4NALfJIDs3LVHBBBHs+zTp8+kb1z0aapOmjQZli1bIccaNGyEXLny4ey5C7K//8BBVK5SXbQxffr0k2MvXrxE/wGDQEbo6LETyJY9p2W2XL1mPdKnd3Yp2rNnHyZMnAz3HTtlgWvQsAk8PD4w7q1at8OoMR9csE6eOo0yZSsgXboM2LjRNhnLhT/zr3uPXujYqYuUoHameMkyGDlqzGdqeH3q4sVLSJkyDXbv2WcVKFe2PCJFjOwkgFBQadCoKVKkTO1kvTl95izKV6yCmbPnisaUbis1atZBvXr1pb0TJ0+JBr5v/w/a1ZYt3dC9Ww85X6VKNQT/IyiyZ8+NJUuXW30wG+cvXESNWnUwafJUXLx0WQ7fuXsPzVu2BoWmHj17I3ee/Nh/8JCpgidPn6F2HRfkyZMfjRo3sxZm9jFNmnSYPNnmwsbnmy59RgwZOlzqUnNP9zhjWVm3fgPy5C2AgoWKgDgZovtB3XoNUL9BY0ybPgN16rrAM0N5/uIlxI0dD5EjRESVqjVB5nfduvWgxvTqtRuoU7c+SpcqC7pQGJo9Zx4yZc6K4iVKW1rk/gMHI2yoMEidKi3GjHM2lR8/cQoVKlRGrtx5MHTYCGmGbeTOnQ+Hjx6X8VqnTl1s2mxjCAcMGoLgQYIiQdx4qFi5mmVVcd+5G4WLFEee3Hnh7r7TdEcWpvz5CmD5ilVYu34j8uTJh8GDh8i4oBUqS5ZsWLBwkZS/c+ce2rfrgC5duoJMDJmh4sVLolTJUtjuvhPduvcSdxdue0W379xFkkRJ8Lu/3+T5eS7z4sUrrFq9zsm3nsx60aLFETpESBQoWATuO3bh1q1boCb+8JFjKFiwCKZMmSpNzZv/j4yTVm5trKbJFNKdsWSpssiWLQf27rW9A9TwBgsSDLly5bVcyrp264FLl6/gn0VLZW6gSyfnFY71nDnzOLkDcu7pP2AIMmbKgvETJ1vXo5AxceIkmWvKlauIqlWqyTlaPSJFiIxIESKhVu16oI+9I5HJ7dO3P86du4AJEydZ7z1d0ziOUqZKg5kzZ1tVDh46jAoVq2LRoqV49uyZtFexUlW0bNFKyuzesxd58ubH8OEjrTqNGzfFsGG294BMOTHl816/cZOUYZxTlarVxVVr8ZKlVj1uDB85BoULF8OChYvRoH5D7Nr1sWWSmAwfOQoz7ULkmTNn0b17D3B+GDxkODJmzAJ39x1O7XKHgi3fS7rs7NixS7TQrdu0l3L8unaOHHnQq1cfqx6ZS84VfIeyZ8+JrVtt2uTDh4+A7xK1771698OixUtEMC9VqozUvf/gIcpXqITixUvhwkXbPHPsxCl5vvnyF8TJU84WSNfmLcX9cuasuTLHcGyQtm/bjpIlS8v2shUrkTx5Sri4NJB3ggfbtusoc5NnYYHjn3MZXSZZvl072z1yLipeohQqV6mGZ88/WPmoXClbriJ69u6LiZOmgusBGTm6lZUtW8Gy9rdv3+mjeab/gMHIkDEz6tatbylC+C5wPi1arKQ85xcOLppc/6j579tvAMZPmAy3Nu3w/MUHoU1u1v7v3oOHqFK1BlKkSIUOHTqBdb0iMrlZM2cVhQO15IYSJ0goAsiuvR9ihRifVK1aTSRLnhKzZn9QQjR3bYlECRLj1Jmzprr88l4WLVoic+Ldew/QtFlzlC9fyVrP3Fq3RYkSpeXeHz1+glKly6FVqzbgNonzCp9Tjhy5MXXaDKvtFStWoXbdBk4xE3PnLZR5hZg4rgHzFvwj9Wk1MWODrs3Enm7Ybdt1gOOayIu0btMOgQL8Tyxos2bPFSG2d+++4kI7Zsx4tLWPiREjR8uckz9/IaxYuUr6R1enjh274NjxE/Ic06bNAPbXEOerHDnzYPqMWfJVes4LpFat2zqt52fPnUfJUmWQI2dueY9Mff5euXYdteu6SDvLlq+0TnH+qufSUNy5mjVrLsc5BwUPGgJRo/wpSi8+QwpY3Xr0kvXAVN68ZavwB1yr12+wzTU8x3Vs9JhxoNWtdp16qFa95lcpmUy7+qsIfC8C3ySA7DtwSDTGc+bMEwFg06bNoIbFBEZu2LgJEcJHRLVqNaR/HTp2xtDhI2TSIbObr0ARWSTXrVsnkyjdtg4dPopo0aLjr6jRMHb8JIkxSZUyDZo0ccWNW7cxevQYJEmSHFeuXhNJnwuAP3/+ZELhYszFuXoNm9aZF23WvBVGjbYJIKwzYdJUnDt3HqtWr0GBQkW/GKh59eo1jBw9ThgStkeGj8IRBRnvEpmd5MlSomOnzjhw8JAIWFyEpkydZjVF5mHS1BnC1B06dFgWKTLnJOLBibVX777iwkUNydSp05AgfiIRSKhF5cRatnwlPHli05q1aN4K7draFtj58xfYGdl/cPWqc+wJGbxlK9fIBM7+ZM+RR/DlNerUcUGBAoXlGdet6yILlOkwBT7G/+zcsVNc1uLGTSCMx42bN1GkSFHUsD+L/fsPIGSIUMIcs+6tW7exeMky3L//QASaaTNm49Lly8KIV6hU1TSPDp26YvacuWKByZQxM4oWKWqdMxv0GXZr1RqJEiYWwYN9JmMUIVwE9OjVF/S/zZQxCxYstPnDjhkzFgkTJsH69evRrVsP5MtXQJgv+kTHiRUXPXv0Apk2R6ILXrduPWWsNbdblM6dO4fBQ4aBDNXpM+fwvwABUbJEKanGyf5//gOgfLkKwuwR34kTJyNO3PhyLzWr10CoEKGsBXPRkmXSdtQoUbFw8TJxZQz42+8oUaIUlixbKYJM7JixRSPG+6tYoZKNebD7cVMg4XuQO1ce0I0sRNBgKFa0hOMtWNtccFm2SJFin7QC0ILlqEmm9nLa1OmIGyceZsychfMXLqFokeII6D+gCEzU6g8ZPBRbt+8QofLihYto3KQZOnayafpnz5mPgoWLg5pwWuTIlJCB3bN3nyzw7dp1EOa9TZv20jcukmQQu3XrjiJFi6OlWxts2boNdWrXtcYfMR09bqIs1tSGk0mcv2ChaAFTJE8p7fD85s1b5fnT5YzCfdeuPcRtlM/IM9NGATlM6HDo3rM3Ro+ZIMoNCnzTZswRIXLfvv0oVry0MOh8DuMnTpG5iG4tPXv1kedD4SJlitR47fEatKA2adpcLGFGu96wYWMYhcucOXMRP258DBs2QpgtCv6co8icUKlSrERpcB4gjZswGX36DRRGv1GjJogXJx4u25lx6+ECWLdhEyJGjILeffrK4U6dugoW5cpVEOtB5UpVQKbeM1FYL1GyDKL++Rc6dOyCLVu2In/+gqhatYbMvUuXLEWyZCkwf/5CqTp5ynQUKFRMnmlzVzKeFUW7SvfS8OEjYsSosSLwkNmhYF7TPheQiSLDs3LlamF0aF2dPnMObt++AwqOpcuUB+cPUucu3eVd2L17DzjX5M6d12L4t27ZhmLFSsg4pcU9Xtz4TvFuFN5oGfVsySFTyfE/YuQYsWzTGkWBevXaDTIvUgClIsZYuyiocp7gnE0BxaxnVHJkyZrDYvK2bN2K+PESggIYqWevvujeoze4LpYuXRYVKlaR4ytWrsayFavkWr169xFB2GilKWTTak3LWKFCRUVw9myNYCNHjtqEfvr4jx49GhHCRwLdor2it+/eI0f2HAgUICAGDBgkCQoWL12ObFmzo0uXblYVvgt0c63v0kD6HC1aDKxevUbO0706YYJEHwkgZFq55tKdi4qordu2y7zSpElTqcf9WLHiyBjm+z506HBZdykEMv6ieYvW2Lxlm1wvU+ZslnBPhp4CP3kJBqIvX7kGx46fFOVUo8ZN0byFm7TP+DUqJQ8cOID69RuidJlyMh7Gjp+I3/z9huYtWmHu/IUYPnK0k1Xi0JGjyF+gkMwntMIOHTZSxsSAAYNlzBCXaTNmShzKtm3b0aNHLxQqXFQsdpMmTZWyGTNkwu69+0URRHdqjpeTp06JUHj5yhVRztSr39gKpG/p1hbsF+nK1auoVLkaOEZprc+SObsINTzHuayZaytZN4h/ugyZZZ7kudJlKqBb9544fPgwOMctXbpc5hjOn8SBSlWOlx49++CPIMGwwa5k3b59h8yPGzZswuLFS5EseSosX74Srzw8kD1HLgQN8geaNmsBzolUhrRs1Vr6qf8UgR+BwDcJICdPn5HBmjevTfudMVNWdO3ey6m/l69cE8aQL4QjnTh5GitW2iY3BmbTMsBYCJouKdlnzZrDCqiiL3+5shWkOrXb+QsWwcJFi2Wf0juDu8xCvHbteoQNE94KzqQJmxo9EhcDSvicUObNWwB//n7DaLtwIgW8+MfJi8yhTxAzYGTJnFW0dnnzFRALwIuXzu4sM2bOlgV42dJlWLNmrV2Aqy6XpxZ85sxZsm3cqChIUIi5bc/0Rdca1xat8dAevMeFw2hjiREnaK+IDGfz5m6y0PL8JXtwPLWHGTNmloQDPL5hw0bxTeY2NUDUaBkNz+MnTxE/fkKMtZuZ6fdKBsoQnyFdzkgMkDdEC0ebtu2k7a5dusH/b/6xZ98BsXxVrmzTXLNsixatkCN7TlPN6XfGjJmioTUHaflJnCiJlW2ITFFvu5Vt5oxZkgyAZcng0gXu5i1bdpIsWXII42Xa8fwbLWo01K1TTw7T/cnQ/YePkDZNeos5IXMcLEhQsB+GVq5cJZrwp89eoEzpMrKI7bFnKjp99hz+jPwnChey4WOEhKaNm0j1qpWrIFzosHj4yKY5HNB/oFgw6BZFWr9ho7Q3cMAg2U+WOKloPWXH0z8u2mTAaLHyirnxVNzaPXnyJKjtu263GlCrnyhhEnABN9SuQyf07dNPhKx69VzkOpwnqKXjO09q3IiMZD5Li1m4aAnLusMxRSGHAg2JDHnkyH/K+8r9efPmiXWH2wwkJ1M6adJkYcrTpk4r13vxygNdOndB6lRp8Nge70QhgEwGacnSZWjf0WbRlAMO/ygQlylTHtTcGiLDWqlSVRkzZJpDhwqD5q4tQE1jlszZsGTJEilq3sljx44Lw2ncjWhNat22g6XJpgDStauN8btw4SLSpctoJaNo0tRV6m7cuFE0y0GDBBPmh1rV/AUKg9pXEpUSZHaPn/g4CJ3vY4dOXUDLGYkMP8fmjBm2uWPBP4tQrXrtj4QvCplTpkyXxBXm/WzVohUSJ0oq7fBfzlx54NbKNqbXrd9oMaQUiGgFkwxfN2+B85uju8nGjZtQqFAR3L5zT1wVHZMOzJk7H1RsbNi4Ef/8s1jm5ZGjRoMuozlz57OE9KVLlyFnrry4cMFmIWVQNIVoY4F337FbXOvoskRml9ZZr+je/fsy/ubPn2+dHjhoCKhY2LZ9OwYNGizjaMu27WKVcpzD+vbtL26gpmKDhk2dGP9y5SviypUrwjwygJz9IJ08eVqYYW6PHjMebq3bgO5cffv1l2vR+sKkErSuGyLTSwWbZ3cbni9dugyCBg5iiooV1DE7mHWClq1375EnV16EChYMOXLkQuyYcRAyeEjs3X/QsZho7Dkv9OjRU+6J28Z9lwoezu2eLSC0RNDCxVhLMsOkkSNHo0D+QlbbXHO225OYML6ucbOWoNKIFrNixW0KGxZmgDatKSRaG2rVdhGmntZHWkhHjBiJzVu2oHQp29zJ95/jioIgiUH7VELRnYtY8H2ZMOGDJdtRocLy9NAwFnkqxLjOMRmHoalTp+OQXZgcMGAg/or6tyiaGEOZNEkyCZRn2Tt37yNL1pzyLlIQ5hzJoH3S+fMX8PLlSxkHzZq3xBQ7L9KipZtY9s21mBwgSpSocj1avzgPGKI1nZZU9p9WYdLRo0eRMmVqDBsxSvZnzpqNosU+YEn8UqdJL0IzC5QqXR69e9u8TLjPtTR+vATSL85pceMlECGQ56hIoeJLSRH4UQh8kwCya/ceWWi44JLoIjRn7sKPFrb27WxaTc+m8MuXL2P4iFFiemcwoTELduna3enlpKm+hV3jQctJ3nyF8I99oqJEX7psBcvszoDOVKnSWi9TU9cWlnmzSOGiMnm1adsBLvUbykvIPn+K6ApBLaxPESeNIkWKC+N15+5dRIkc1ck1g9dp2LARkidLLqZsTqgNGzS0XNhmz54ti1XKFKlw1579iNp9CoBknEiLFi9FS7d21qLMRZWLB4lWCJY1jJIcdPi3/8ABhAsbHgkTJgY1jiROrkw2YDRhixcttgKtqdGnAOLol0uXBgo9pJs3b6FI0ZIgo0KiFpxuViT6cV+/fkPcopIlTQ66otWv3wjVqlXH0MFDQVcBMmZ9+36YNClAGAFGGnH4R60zEwaYjDG8b8dFkIwqGW5De/fuF8GyevVaotmju8ijR08ks9Aiu3Bryjr+9us3UIJ/2X+6Zhjh6/qNW+ICwiw5pBs3biJ40OAgE+5IJ06cQo9efcB7jhguvGgyeZ7uiOHChEN1u7Vw2HCbRm7kSNsCU6RQEVD4Ma4LdL/y7+930AWRxKxIZBim2d0Y4seJKxrOV558q1nWJnz7Axn2x97wL3d3d0eypCksbRwZ+axZsoMMHenh48fg+0ZXPy6wrs1biMuOEVBWrVyNgYOHIk+evJKhjIwVBTUymYZZ5eKeLm0GzJ1rYw7pRhYrZhzs2mWLQSITQr9/krgc5smPsmXLo269+oL1WLu7Zbt2HWVMSUFA5hgmliDRNYmKCa9iQLjI0+VwqYPbA8dmurTprHeyccPGIgCwLVrTiHu2LNktX30qDvieGx9/Kkko8Bg/dzLrJjPdgQMHZcyRwSAVK1ocmTJkQpNmzUWYbde2nTCD1MJTi26IDCsZphNeCCAs06//IPTpO0CKk9mjNp1jjDR+wiRQ6PPsfsZzdM2km5yx1rg2cxX3Kp4TTXqO3Fb8Co/xmQ4eOlwUUaVLlxNmmQJAvfqNQC2zIbq0RYkYGX9Hi2G54JpzU6fPBBl3Ml20dLdq6SbKjX79+qNY8ZKmmFieixQtAc7LJAogtMIZay+Z4cJFSsi7xxSuc+YtsOp63qC1Zo1D8g9mDKpQsbIICU2buoqAyDbq1XWR527q0y2vUmWbhZYWm8pVa4Axg6RHDx+BbmZkmOn+x0xOnoljgIowjlm6KDZzbYFuXbtL8DBdRh0tU+PGjReriaO7kWmvSKHCiBr5T7z0pMDi/N+mTTuxbHTv3ssWiwlIAoAwIUNj245dwuhzzBYtUsw0J7/UoocKHgKuri2lb66uzcWiwZN8JgkogHiRoIPWrZy581tuu3TTM65xfDa0ou2xj29amlybu0mMJT0JOtpjoZw6YlcSUAChAPfgwQNkz5odlStXESt7rZq1xYL4xD530YrINZ1u1VRQvX1nc8Fu3KyFCHWe2+Y+rT1ly1cEeQ1DdP9dseKDuxOP799/0L5O1ETc2HHFM4HHKciNHGlzw6areKJESUEBmeS+cxdChQqDtGnSWXMMnyHjL41bJBUCZPQNmaQD02fMFqs93cq9Is6XtJa2aNlahCCWJ9ElnO7RXMdI+/YdQNZsOXH06HGJmYwTOy4WL/6gSCXfkCF9JinLuYcuYxTCSK1bt0Ule/ykHNB/ioAvI/BNAgjdkAoVKS6TnOf+kbkkHT5yVNxuuOA2buoq/vk8Th9w+vuOHz9BytHthhMSiT7gNOcaon87g+BIXDS5yBh/abrx0PxpMvPQhStlqrQwQlG7Dp2tVHTlKlTGsWPOGkMGmHkOvDbXZayGWYh5zLMGxZT72l8KXFwwTYA2tTfxEyQW7Zhpg4HL9RvYtN7mGAP/tu/YCWbtIOY1atSU+AVqW6hBpVBhAvzIQLRt38nyYaYmuZM9CxbdR8gYmQA5o53jdYxwSLcSCiwxYsQSJoDnSpUpj5WrVkt3Vq5YKbEA3GGWsRQp04BxIoYKFykGahMNtW7TVtzFLl+9LpMxzccM/jNEC07GTNmcmBVzrk7deqC53RDdIAoW+KBZM8f5y5TQtIQZ6tylm1NZMsQMPCbRSkY3Dz4PkxaVzAQ1pgkTJMa6tZ/OSkZfdS7exNEIO2yTC0ia1OkspoPjNPD/AqGZgyaLTCO1+fRlphUiyP8CyfVZny4VdBmjewyJ2kBeh3EgpBJFiyNWjJjiXsT9UICDSwAAIABJREFUXj17I8Bv/mWB4f6y5Suk/PTptqw28WLHRd48+aSu53/sa+ZMWaT8mXPnPZ+WRZ8aRMfxwULUYlLIY3Y1Et2dsmbNLtpM7tOtgoGj/HUkuj2RCaf2mu4F1CoyxSaJrm2MBaLFh0TBmtnEzH3wHaSm38Q6MNjSuLJwPFew4yWV7YkxyHCTmSpjvwbPUQAdYE9LO3bceBEyTB3H+ySj0KJVG3FjMucZ+0N3Gkci00Fm6vHT53I/dInLTeH+3j0cOHjQekdYh8+7S3db3AD3aRXrb8+CxXmQaXmp7SfRF57xCZ6J8U9MQWuITC+ZmNNeMIQs02/AIFHucJtCHa0PdHcj0S2JbpqOcQ5yAsDqNWtRrnwlKyi+Tes2Et9lztM1bsgQm2WFluVcufPh1OnTomk1z5QJPOq6NBQB0dSjsEgXMGqvU6VK42QBZfAs++SZaGFLnyGz9Z7R+koBxDBZxI7WKjP3sT7jjPju03LGtedTRAGELmYkCgUtW9Ei4RwzxeNNmzYHBStDtO4y7oLErFjNW7bB8ZOnZJ9rUMkSpUUgp8KGsUCeiW02c2350bW4ztAyxvhEQ8NHjLSuZY6Z32JFiiFwwEB48uxDrArPzVuwUDwIOMdyvjPfQsqZPScC/h4AR+zKQsabcX4h9oZoMaP12auMdBSWmAnLxFiYOvylJSdXnvyWhZPpvE0CEGJClx6zHvN9JmZcg7iGU7B2JJM9k14KDPYnXbp0Cdmz57LGgSnP93bM2AlIny6juGat37gZmbNkM6fRsHEzbNnidZYrWifpUjVg4GCrPF3JOMYM0RpDC82Jkydx9+5dmYeMay7nPbqTkWjdSpEitTWeeIxCOAVKujJftXsTUAlBaxGJihta7wwdoktVyjS4f/++KF9at25nTskvlXy0pnAu4txGolWXygTSoEFDxeJj+BX5BEHWHNb6EjlSFLRp88GqO2/+QqRKnU7q0oqeI1deXL12TfY7deoCKuaUFIEfhYC3BRD6ZQoDmiKVmEHp68w/LsDU6tCfnswGswCRSSbRDaZosRKiserRozf+/PMvCU5jKklqxqgdoFaDPrbUmlAwILNOVwoTRMpFhYGg02fYFixaQOgisGzZSlkQGKzMWBNavtlWzdp1rcBJ+kPWqFVXJjS6ZlB7TNcXRwaE/aTmnnEjvAcyM3fu3JG0pCYwm8yqiXP52gdExmbr1u1InSq1aCJN3n8uGFxkqY0jo0Y3qXwFCompln3cu+8AGHxPF5YixUriin0yq1GrnvSP/aB5+siRo6JxZWBk0KDBLe1fjeo1LezOnDmDbNlzgYwb40V4PUPUCLVu21FceXiMWmAuLIxvIN4T7L6r1OzR/My+0beagZ+VKlUW1xAypgyepxuNIT6nuPESYZN9IVi7dq0sfMbaxXLU3tCNjMIIXccWL7UlLdi1aw/+FzAQli9fIcxShfK2uAcz6Zpr8JcMPWNhGIN09PgpVK1aHSmSpxArBTN7UONYq1ZdYVYYtE9cSFwsw4QOK8+EzhL0KSeDRU2Tmcwdr0OtOa11HJPPHII+GaQbOWJkFC5URIrTykQLRYXyFcTthIJu9Wo15d5nzV2AalWryzbHBInMDxmC3DlziUWJQjj3O7bvIBqsjOnTI5D/gBbz1axJMzlvhPZRdr/2Pn36St/DhQqDBPHiWxYTx3vgNjPY0QSfKkVq0CrDAH1aMsgo0uRvxrpjvQMHDiFBgsQSF3Hy9FlRFCRKmMhJK0oXR2pxuWBTE7x9xy6cu3AJmTNlteKBGBBNQY/MBplHKhDo2sJAfyYvSJwwseU+xDkmSOA/sNi+cFPwoisixy/fTQZAk+l89OiR/M2YOQd37j1Andp1bCla376VOaRggcIizNLqsnr1WpkHqIyge9hLh2xYtFwxVSnbNK4vBw4eRvYcuUHLGMc9k29s2LhFBJCSpcrhod0Vr1rNOjh58hSuXb0mwgLnPbbdoH4jhA0TTvz+OS9VoLbf7lrHuY/uKwz45Ty0dv0GYeoZN8BrrVu/SdJLU/kRM2YcNGncVOZPzk8cHwwY9py5hti4tmglVigqFJjUgSlA6epKYrBt5szZPkpDLDEtEyaJQGQ+XlqhXHmx1pEBItPGOADjgsXgZz5vEl0naTnl3EgM+D7TpcjQvHnzxSLHfVoYwoeLgH3794tShzElOXLmlXmDjBt9/cmocvzky19ILKoUovi9KcZZ0ZLJeyazyHfRxIuwbVoIgwUNjhYtPo5xMX3hnJY+XQbMnPlB6KFihPEvjPljH6gsYDA83wO6sowbP0EUFNQOB/AfQBhfj7dvxZLGOA7Ow9Omz5Jn0rRJM5k76HLDhCoUVNiOsUAx/oeWY3MtvvschxwvVJ5NnDRZnjEtyYH/FxiDh46w4lHMPTDonnEX5cuVl28Bbdq8TdYKc97xlwoiWkQ5XvgtKhKtSHz/eWzQwMEy5zDBSrw48ZEoQSLRhnMOZNwkYxuYYIMxfF5lq6NrHzNFMmaJRAGb8RHkEfh+07JA1zpaBPkNEr7PVLDwA8HZsuUUly26s3Kd5btGYpwZlTy37K6xDG5nID/fNeK5eMlymT/oHWEYcgpwTC5Bd2QqQTg30I3KK+I90QWLGFOZsGjJcrGemKQprEM3JCbeIFHx8UfgoLIm0g2SyqC2dkUa1yl6DlDoZN+olJW57d17iTWkqxnnAQZ389tipB07diJqlL9EScB779tvoLhN8hzX56RJU0qsBnmqy5ev4vmLV2K5Dho0hAiVz58/R6JEScQaSYyJb4YMmeVbL1Qs0EWTz4TffyFNmzYdEcJFlDmAfaTAPdLufs57Y+ymcTdr2KCRvKtGUcp14WsyH8qF9J8i8A0IeFsAoX8/M88wuI3BnzVr1ZWMVrRq8LsgZNToFkGttHFFYKBm4EBBwEAvxgtwwqC2k9ppBv7xZad2idoaTkxMa0rNf4J4CUGNDf1PqbGgnyfdgjg50hWGQk6/AYPFB5nChyFmMqKWMFmylKC7GIkWAjI8jDGhmdNRe2bqtWvXSSYO3hMDxfhth6nTZoJCAxeaQoWLW5mQTJ0v/e7YvUcWuAzpM8jiP9fBPYCBctGjx7I09BRC2Edqmx1jZ5g5ha4N7Pus2fPkkmSaGQzKyWfgoKEYPXa8ZHbhgseUsMxExW+1GG0grRPEbs9e2+Jv+k1mjgwmNS8MZvtn8VKJD2AwKs23ZP45qdLFKvrfMdDb7t5B/3K6gpChcWvT/qMPyjFovllzNxw5ckwuxX1qtmgCdqShQ0fIdflNCuPSwPO0evwdLbowHcwyw6BuClueiVr98hUqo0GDxti2Yzf43QEmM2BWH1rFiAMZJu4z+1TK5KlkQeAYyZ4th2TZYZsMeOe3WXbt2feRYGquSbdB+mc7EjM/MQg2tVjfbAIYg/Njx4qLWXPmCcO0ctUaMePTxYFBkVzoa1SvJdeh8M0gc2bH4rdDiHOs2HHQyq0tZs6ZLxpspsFkPAKz4tA97+/oMSTpAL/NQZeHJImTyKI5eNhIceWJGSOWFf/k2FezzS9p07rId5SxNoUKFZOxxAXZLD6mLH+pEKCrA5nLLdt2oGXL1hI/07atTSPHMnS5aNi4qQSWkwEg3iRaT3LlzCOMOV3peH9URpAYHM7EEgsXLZExzOfNAPb7Dx7Jwszn2LSJKy5evioadAZfj7ELxBxPZOb4jpKh4IJN5iN7tpyIHSuOzB8MjGfAJi1UzDJGomabLgpnz19wes5kRGPHjivBxoxDMkShgxYIui3QjcwQ74/WWn6fgtZYEoWFUWPGIVOmrJICnO50vB/GVTC1KjXJnPcYaE5ihr08+Qpa+2RIqVRg4oRx420aTpZbtWqNCNlkumiRqVChknxYzgQwS2N0x5u3QK5N5nzl6rWiTKF/eY+evUBXQVoNKEiQCXG04jG7HZ8Z32/OjSdOnRHc+DHTTVu2Y/Xa9UibOp0E2NN19cSJk8iZM7ckcWB/OHdQgOd7SAaUz4OxHlS+MJA9QoRIEifAFLBJEiVF0qQpJKsU+z1/4SLJkMiPFfbs1c9Kg06BmNpmZqbq3LWHBKRzLD17/lIy4jFg3li2zP1PmjwNIz/z8VnOgRxT1NIzzTiJ6wDvgVY6Mr7MqGbWBo4ZuvXSOu3Wup2MXZNum0G7/CAkkyrwWfF95hxCIuPGoHI+R65r5+yKOI4PxkrS7YXvEi1Jxrq+ZOkKUfhwjuS7TzcuarM9P2O2zzWEczPnXmY3Y0yEZ7p2/Sbq1KuP1ClTI1PGTEibJgM4D5GYjZJZKsOGDodxdk06LcIZ0mcUdybizfeZAhvniSh//mUpBsx1zp49L+5ICeMnBMf5nXv3Bde//opmpfLesGmzCPDMdsVkAxRajeKEyoDEiZOiaNESMsdT6UOBie9TksRJxXWRgjGVXUywkj9fQckiSAUWMaPLFJPUlC1XAUOHj0TlSlVlDHLs8VtQzF7FOcIrIq7Jk6eS58akJZyrae0yromMp8ySNbtkKus7YLC4jfLdY4a6lMlTokaN2mAmMsZhRI8eQ95HWoiYtYrJE5ixj7EYIthPnCJrA+M2jCWSSXp473TnMoKh6SfXmHjxEkgmOo4DCkzkPxibxgQX5K3o+sg5xihjOZ758d2Hj59KvGOCeAkk1sm4LNLNNW6c+ChSpASYhIBEIZtZ8chTMSnDzdt35TtlMaLHEgUzhRmXBo2dMneaPuqvIuBTCHhbAKGG8+zZc7JQ8KWjJs/8mU7RkkBzuXGPonaMzIEx9bEcmUm6EpGoETp2/Lho0+kSRDcQajYZ38BJkMw/M2dQU0NLAH3w16zbIFootn306DGnoNqbt26JRu7KlWti2jT9Yr+ZjYdaBK+I/eHf+fPnRZtH8zEXURIZMy681JR6h6i5uXDxoghe1BCaoHHTBrVfzM5lgoK5eFETQpwdifdJv3FjQTHn2KeLlz6kr+Vx+s4SfwoO1HoY4nU837thOPmsqKkzmWOYrYN16R9KjRhjV8hUmCBVtslJinjyeX8P8R5sWH9Io8z2iM2pU6c+0vR6vhbN+iY2hmOFY43aJWqS2Wfeg8GTCQHoP8/nysQGZgyyTS5YZsx6vobZNwyD2aflhtpXXoeaW0N0geAYNkTmy5wn80d8qfkmtsSRbRBvMtL076ZbGPtGVyVu8/lTG02LBctzn0wCF03mgOc+yzMYmAKGuV9zfa9+yQgTe74/xq/aq3LmGHF8/+69jCFibNw8zHkyEXyO/HPEkffNccL3lsSMMcb6SIGZGlP+cczyuTF4k1iwHp8X5wNej9Ylo6FnOyzDecS4R3AsUyDls+B7J9ade/dlfBIfEq0EnAc80/Xr16Uc0/Fy3DgS75txVJ4ZPbqbMlOVeYdMHc5lxmJpjvHL9bw/Pi8zDniO75P59hD3+Qz5njuW4XHOfYcPHbZcpEy7jr+cI9lXMh68X+JFpo7zAWOWiCHvgWPPMNmsTysD5zWW5x/vn33l+OL4JpYcH3zefBZS59Fjp2dKaxqZWD4T3gM1p3xXiBH7QC03id8UIf4meQiP8T1nBinO247EfnF8OmLBNokZx4VhGFmHQjLjAB1T2zq2xe0bN27IOsJ79fzMGOPAsWSSeJi6nNtoQfYqJS77y36bsWzq8JfzLMcM780zcb2k6yXfdUdiv86cPftRLKVjGbPNOYHvlIlFM8fNL9+hc+fPy7rz9r0tqYN5B0wZjhOuvYY4HxIDM3Y5r3IN5tjj+HUkWosY48V5i2OK7zvfXVl37JnMWJ6xm3QX9EqQYn94PZYhcW1jFimOF/bLcYwSL44FR+K1WN8QhVbiR56BSkrPa50px1+OUSYx4DW4Tl6/ftNpzuI44PXMu8nnePrMGZmLec42Rz2Q++e68db+/Q9aSTifGuL4532yP3wPDfFdpcum5/WE59kex5XjO8LjnDfN3MVnd83uOkUri3lmdOXiM+G8apI0sC4zTTr2i+8W+TeWZZ/pFcHnzPeKz5q48H3mc1ZSBHwLAW8LIL7VEe+2yxSbDChVUgQUAUVAEfDbCJBRZLpyMoZKioAioAgoAj8/Ar+kAELNLc39TIvJ/OZKioAioAgoAn4PAQbP8wN4/GYT3erosqL0eQSo/d7i8GHCz5fWs4qAIqAI+A4Cv6QAwvgDfj2VqTn5ASslReC/igDdmui3SxclJUVAEXBGgDFlTD/LmAKlr0OASVayZMkJfqtLSRFQBBSBfwuBX1IA+bfA0usqAj8aAab6ZNCu+bLtj76+Xk8RUAT+WwjwY3TMgsXkJUqKgCKgCPxbCKgA8m8hr9dVBL4CgU6dugqzUL9+w68orUUUAUVAEfg0ArR6JEuSTOaUbFmz4/7DD4kyPl1LzygCioAi4PMIqADi85hqi4qAjyDAbD/8qu7v/vyBKWiPOnxnxUcuoI0oAoqAn0KAHyoM6O93/B0lCvz784dRo8f5qfvXm1UEFIGfBwEVQH6eZ6E9UQScEOD3CSh8xPwrmmgsmzh8Xd2poO4oAoqAIvAFBPjB2GRJkyFU0GCIEzMWQgT5Q77bcvfeh/SwX2hCTysCioAi4GMIqADiY1BqQ4qAzyHAL6ynTJEKoYMHR+wYMRExbFjE+DuGl18k9rmrakuKgCLwX0Wge4/eosiIHT2GCCB//xkVAfz9jomTp/5Xb1nvSxFQBH5iBFQA+YkfjnbN7yLArzzT+hHjr2iI9Xd0xI4eXfb5FXIlRUARUAS8gwA/9sevXkcMExZxYsSUOYXzSuhgwZE2TXr5wJ532tOyioAioAh8LwIqgHwvglpfEfBhBK5dv4n48RIgXMhQoLZS/mLERKRw4RAlYhQwM5aSIqAIKAJfg8BrDw/Uq1dfrB9/BPwfaPlIHD8hokSIiAD+/Mnxjh27fE1TWkYRUAQUAR9DQAUQH4NSG1IEvh+BN2/eoFGjJsIUMEg0asRISBQvPqJGioxAv/0ux4sXL4nnL158/8W0BUVAEfjPI3D6zDlkzZoDObLnRIkSpZAkYSJEChsOaVKlRtmy5ZEubXoULlIcDx89/s9joTeoCCgCPw8CKoD8PM9Ce6II4NbtOyhfoRLKla2Aps2aI2P6DAgTIgQypEsv+5UqVkHxEqVx8dJlRUsRUAQUgS8i8OzZM1y9eh2vPN7g9Zt3KFa0uCgy2rXvKHWfPH2G6zdu4sWLl19sSwsoAoqAIuBTCKgA4lNIajuKgA8gwC+eP3v+gRFo0KChMAsNGjSyWn/12gOvXumX0S1AdEMRUAS+GoFSJUvLnNK9e8+vrqMFFQFFQBHwaQRUAPFpRLU9RcAHEajv0kCYhYYN9EOEPgirNqUI+FkEihezWUA6dersZzHQG1cEFIF/HwEVQP79Z6A9UAQ+iUDdOvVEAHFxqf/JMnpCEVAEFIGvRaBkiVIyp3TqpIHnX4uZllMEFAGfR0AFEJ/HVFtUBHwMgTp16gqzQEuIkiKgCCgC34sAk1j48+cPnTqqBeR7sdT6ioAi8O0IqADy7dhpTUXA1xGoU9smgNSr6+Lr19ILKAKKwH8fgeLFSogA0rFjp//+zeodKgKKwE+LgAogP+2j0Y4pAoCxgKgAoqNBEVAEfAIBkwVLXbB8Ak1tQxFQBL4VARVAvhU5racI/AAETAyICiA/AGy9hCLgBxAwFhAVQPzAw9ZbVAR+YgRUAPmJH452TRFwqeci7hL16moQuo4GRUAR+H4E8uTKI3NKq1Zu39+YtqAIKAKKwDcioALINwKn1RSBH4FAhfIVhVmoWKHSj7icXkMRUAT+4wiYIPT2HTQG5D/+qPX2FIGfGgEVQH7qx6Od8+sINGnqiuAhQqFFC9VW+vWxoPevCPgEAjt378XceQtx+Ohxn2hO21AEFAFF4JsQUAHkm2DTSorAj0Hg/IWL2O6+C5cuX/0xF9SrKAKKgCKgCCgCioAi4MsIqADiywBr84qAIqAIKAI/PwJ3795Dx87d0KpVawwZPBTt2raHa/OWYAKIHTt2+cgN3L5zDw8ePPhiW5MmT0G/fgPw9u27L5Y1BS5euoSFi5aYXfk9duIk6tarj/r1G6JWrTrYu2+/HL956zbev3/vVPZn3GEXr1y9bnVt+fKV6NixM548fWod4128fu1h7euGIqAI/BoIqADyazwn7aUioAgoAoqALyJApnbR4qUoUqioxF21bOmGGTNnIUqkKAgdOgy2bnP/7qv37jsQZ8+d/2I7LvXqI07seHj46MkXy5oChw4fReeuPcwuevToheTJU6Fnrz6YMmUqxo+fiOUrVqNkqTK/jEvnrj37MHzkGOueWru1xV9R/wYFKEMdO3fFiRMnza7+KgKKwC+CgAogv8iD0m76HAIHDx3G9u3ucHffgZcvX1oNv3r1GmfPnsONG7ecjlsFfHDjzZu3ePjo0Xe1eOv2bdEOOmoDv6tBLyq/9rBpFl+8fIlbt+9+pJF9+/atF7X0kCLw6yIwadJkEUB27totN3Hw0BHZz5QxCx49eYrLl6/gwqXLeGc3INz4P3vnAF5H04bhfvg/1bZt27ZtM2Wq1EyNJLWt1LZt225Tpm1S9/mv5z1ntydpipQnyTvXlZzF7OzsPbuz82r29h08euxpXvDJk6ewdet2PH/+wtz2+LEnunbphnjxEmDZ8hXw8vI29x04cBA8xkgeHh44cfI0zpy7gNevX+P+/QcitLy2WkPYRz2zOZ7HsQ+YPccdt27dlmKGD3eVOo8e/X7wzh1v375Fly7dUK16LSnbOKen5xNs2rwFr16/lk0874WLl3DvgcVa43HvHi5fuWZk9/G7ffsOXL/x3krhY6fNytZt23H7zh3ZwnpcunwFdz3uyfrdux44c/a8D0vG8eMnkClDJnACjhs3b+GxpyeOHT+J8xcvyzHsu3v16oN//vc3Zs+eI4zOnj0nbUGeL168kH58567duHLlqk1NdFEJKAF7IKACiD20gtbhpxDggKFunXpIkjgZatepj3r1G2Hm7HlYsmQZHj58BC/v56hUqQrChQ2PSZOn/tA6bdu2A1GjRMeSpcu/+jyz57ojUsTIoKb2R6RjJ07JS59lL1y4WAY0s2bPNU91684drFu/yVzXBSUQGAgMHjxU7vUDBw/L5ezavVfWixcrIUJFnz79ECJ4SDg798Pzl6+RKlVadOrcVfJOmjQZ6dNlQNnyFdHUoQXu3PWQ7es3bELI/0IgQpiw0vdQaGEaNswFWbNmR+48+bFth8XCsmDhYsSLGx9NmzSTPAcOHhJrSMkSpTB91lwUK1YS+fLmx/Wb7wf9FACaNGsh+Rk3xnPFix0XfjlZsR9cuWotXlmVC6wLry1p0uRo0KARHj56jFevXqNmzTpIEC8Bhgwdjj7OAxA3TjzpF23dwlq1aoPkyVKiUKGi2LN3n5zfr3+du3RD+vSZUKVqddy8dVsEoeYtWyNGtOjo138gBg4ahhjRYqB1a0fz8PoNGwv3pImSYNHiZThz7iKKFimOiuUr4unTZ9i+czfChg6L0MGDo1Gjppg4ZToK5C+EiBEiokaN2iJ8nDl7DukzZMbESVPMcnVBCSgB+yCgAoh9tIPW4gcTePP2HfLnK4iI4SNiz+494v9MH2gOMoLLYKK/1GDsuAny0nNzG/FDa/TY8wkGDx6G3bu/3rf84uUr+O/vf1G1arXvXtdRY8Zh7PhJZrnUVHbq1NlHfbv37IPlK9eYeXRBCQQGAm5uI/HHb7+jTZu2GDlyNMKHDY/YseLg8OGjcnm0DPz95/+QNXNWPPV6juEurtiwYSOu37glfUf/fv3x+s0bZMuaHSVLlpZjaF0tUrgokiVJJgNwbnR3n4/fg/0h2n32QzFjxMLp02dw994DRIoQCXFjxREBgnEQBfJZvt0xfcZsjBw1Vs7Tp28/EzeP37x1u6xPmDBR9pcoXgpeNlYYM7PNAq0K2bLlQO2atWVr0iTJ4NimrSxPmTpNyilTugx27T2ARPETIGGCRPC2ltmpUxckiJcQb98CHTp0ROJESfDWV1wJrRQ1a9RCwYJFpMwG9Rsib578srxk6TIpP13adDh24jRyZc+B5MmS46HVmrR67XoE/+c/NG7UGEaxBfLml2M2bdkm+QoXKoKI4SPg9JmzUua69Rtlf9u27WTd29sb+/YfFGuIbNB/SkAJ2A0BFUDspim0Ij+KAN0KGlq1aStXr/3gNLRCuLsvkO1z57qLVm3uvPlYu3YdunbtgZevLG4JxoHr1m2QgcUkG63a3HnuaNuuPQ4eOoyzZ8+juUMLGC4c02fMgoNDC+zYuVuKePX6DUaNHguei9pAI50+cw4tWrZB7Vp1sGXLVmOzuHo4OXVB3br1QXcQI9EPOn7c+HJtxjbfvxcuXUatWnXgaiNQTZkyDa3bOIKuCfQbb9G8FfbtP2AeOmXaTNF80v3B1W0kLl25Bl7rPPcFYiliRmo+Q/wbHOXLV8SSpSswfvwkce0YM3a8WJJOnT6Dho2aYMbM2Wa5uqAEAgIBV1c3/PX7n2jUqAn6DRgIp46dcPzEexepe/cfIEqkKKhcqYqPy+nbtz/++vMvrF23XrbzGz4h/g0BKhuYypQuh8QJE+OFNWCa3+GIFjmqCPbU/AcLFgy9eveRvOnTpkeGdBlMC0bZsuUROUIkPPZ8ClpTmLdjByfJy3/1GzQGXZaYqDzh/rJlK/gQQGbPmQdHx7aggMKPEG7Zug07d+8Ra0mVSlXg6jYKESNEQlXrN4eWLluJP377Ay4urlJuxvSZED9OPLD/4l/ihEmQNnUaUFmRO1cexI0TH7dsYjN40IWLlxH8n3+RJVNmjJ84GalSpEbE8JGkPLrAsp5OTp1lvWTxktLvnDt/Qda379iNEP+FQMsWrWSd/ypXrCz9ziFrP0jLTbjQYXD9xg0zT4vmLfH3//7C/QePZNszLy9zny4oASVgPwRUALGfttCa/CACfAlWSEbSAAAgAElEQVT+HiwYChYshM/NKcNBdrhQYVCzZm0MGTYCoUKEQq2atfHW6vC9ZMlSZEifEb169kKxYiUwdux4qbXbiFHyMqXW0n3hUpQuVQaRI0WWGVtmz5uPf/78C9mz5RAfZ2rzOLjhy3em1aVpzdp1yJIluwz4S5UohRQpUoNxFw8ePESpUmXRu7czGjdqhGhRY5gDDbpdxIkVR8ryC93atetRoEBhdO/RE9Wq18SUqTMk2zCrfziDaydMni71ihXzfWBn/4GDpW6pUqTCjBmz4HH/AahpZH3nz18oZZQqVQbhQodFh45O2LFrD9xGjpb9qVOmlv30RS9WrBRGjhrjV9V0mxKwWwKDB1nuf7rv+JX43IUNFQZNmzT1sbty5WoIHzYcdltdkahIoIXy6jXL4LhQwcJImjipGb/RqpUjQv0XAu3atsPECZOwaNES3LhxE5evXpN8mTJkNssvVrQ4okWOgtt3PbB8xSp51jjQZqK1ZdCQ4bh23TJV97LlK2V/mlSpffR3W7ftkGedz3Ga1Glx9Nhx0N2LVoauXbtj2rQZmDdvPk6dOiPl0tryv9//xNAhw8TVLEumLIgSMbLErzzzfoHIEaOgXNlymOc+H9Onz8D2HTulzzIrDYDCROgQoVCxfAVRuLD8vfv2S5alS5dLPdtbXUhLFC8hAf8HDh6R/RS0yK9JoyZmkeXKlEOo4CFB91CmfHkLIHyYsDh77ryZh65sdEFj7IghqJg7dUEJKAG7IaACiN00hVbkRxHYsm0H/vrjf6D5/3OJMSF//va7BIx6v3wtWsiokaKYAgjdKuh6Qd/q+vXqywv06rXruH7rtrycUyZLAWrcOF0kX/SMOWHiYIQDflo5mKgljR4tBpYus8SAlCheEvnzF5R9/Ld9xy4JCGXAa/bsuWTWlw0bLe4FHCgw3bh12yKANGxsHme7ULFCJXFP8H7xAkOGWPzaDx8+gqde3ogZLQaSJEwMzydPMWa0xaVjlzXodtfuPZa6121gFufcb6BsmzfPXbYNHeqCKFGi4dARi1sKN+bKmRsZM2QSCwjXn7/QqTFNgLoQYAhMnDhJ7nW6WvmVaMGgW1Y7R4urkpGHs0z9Hux305pYqEBByff8hSUYnRaT5ElTwAgm79mzt5xn/4FDRhHyy2Dv1KnSIFuWbOb2cuUqiNWFcWrsG9i3dLN+yXzOvPmwLePN27egcMI8K1f5dJGc6z5ftjs6tpeyafn847c/0dfZ4oJqnpBxX4uXSr/p5mpxR82ZI5f0G9TFsP9LmSIVsmTOansIXlqv1djI7xfRikIlju/EPpB1pJWZiYoXxplcsn7zaP/BQwgVIiTa2MSFVKtSTbadPW+ZSax4seKiMOIkAAxsf2e1GVWqWFnK3rrt22cu811vXVcCSuD7EFAB5Ptw1FLsmAA19H8E+w1ly5T7bC0ZZP1bsGCYM8cSbE0Xp7Sp08pxnk+fiV83gyKr16iF8uUqoED+guJ/fPrseYQMHhIVylWQvCPcLBaRqVOny3qlipVEWDBcOeh+FS1aDGzctFlemcmTpUDjxj41qkZl+bKfNccd9M/+939/YZF1rn8RQGLHlcBRI6/tb4XyFRE5fATUqFELVatUF2GKQsaVa9cRNmRolCll8U/v1rUbfg/2Gyh4MK1atVpe3pUrV4XxGQIOEjhYWLhgkeRhUCkD4G0HOAcOHZE8/fsNwBMvb1y1amRt66TLSsBeCTBegW6JFStWkvu4Tx9nXPZj9iRaAfkspEuTzsfsSh4e98Qlska16pi/YBGSJk3hY2DPQTiPW2V1Az1+4gTChAmLCOEiYM7ceVizbgOOnzwNBp0zX5iQoSSuhIHs8eLEk20bNm2Bs/MAWWb/Q3cw5/6D/AwAZ0B5gviJJACbMSpr1qxFhQqVkCxZCoweM85shg4dnPBbsN8k3mXnzt0yGxVjOWg5ZT04JfDefQcQJmRoWd+02eIeSgsr99erWx8bN26WWIvr19+7QhknGD3WElfXrKkD6L5qxKoMHjJMji9SuJhYKpIlTirr8xculkPZT7Evpiuo1/PnePrMC4wX4Tnp6snENuI643boKmp824RutTly5BaLklEP/VUCSsC+CKgAYl/tobX5AQTog02Xg3//+geHjx738wyGn/Ccue74I9jvGDfO8oJLlSIlaNVgovaRmsk0qdLA4959H+Ws27BJ3DJKFCsh23k8X4wjRoyS9cqVKsvg5Ow5i38zY0xixoyNTZu2yH5aVdKmsQg6tgXTvYLxI737OItQxODYRYuXSJZr12/IzDHNmjnYHmIulyldFtEjR8XtOx5WvaBlFwM4I4WPgEIFC8mGXj17yzUbrhEcYPzvjz9QqVJVsyx+/IvXs8g6OOjffyCiRokm/uhmJkB8wWPFiIkVq9aAAaCalEBAIXD/wUN0cOosMVOMO6hevRYWLVnmo/oc4FL4pjBRsWJlLFux0sd+xj4xLooxG5OnWJQPRoblK1aidOmyGD9hsrEJm7dsA62fZctVwHDXEfKcbt6yVcqgQoIzcO0/eFg+IshZsSZMmoJu3XvK+RlDsnvfAbAfePnypVmm7QIVKvzuB2PgataqI0oO2ymDjby0xuTOlRedOnczZ77jeapVq4FBg4fCxW0UGjZoKOUwps1ICxYuQr58BWRGwZ2f+FgjJ/fIn68A6tRriJs3b8nhAwYOEQtx69ZtZMKLzp27gv0k8xrJqWNn1KlTD7du3wUtza1aO6JS5SrgTGVM3N6gYWO5LtvpyClAUSlkWJ+M8vRXCSgB+yGgAoj9tIXW5AcS4ICYA2gKD5xy10h0s3AbOUZiLbiNPtHMN2G85SXIGIsEceMb2SU2hPs3WgUHb+/n8HzyBLv27JPA1fx580lew61p9BjLPPwUTOiCdeHSFdk/f8FCcc9YsXKVrNNFi+Xu3mvxj6ZLBT9YtmXrdtk+ZsxYLFu2QpY5kGFiEDq1p9RQ+pXq12sg+WltYeL0nJwt5uTps+JbnSNbDtne06rpPHbMEsR64NBhiVkpVqQYNm7eKu5n/awuWAsWWGJAxowZhwjhI2LpshU+Tm34n8+aPcfHdl1RAkpACfwsApzw4+5dyzdGftY59TxKQAn4j4AKIP7jpbkDMAFqE+nHnCZ1OtF0UiPYvEVrbNhg+ZYFtWVVq9WQQTsDRIe7jESEsOERLkw4TLR+F4RBohkzZkLoUKHBeeyXr1iNp8+ewW3ESDmuQL4C2LVnP6pUqS7uA9R4rt2wGZkzZRE3p6nTZsiUktRuUuBwcGgpbgNnzpxFsiTJZbrPZg6tMGSYm3zc69SZsyJk5MyZG0OGuyFRgkQyJSctNhJsGSIUGEC+78DBD1rm5KnTSJEiJUKHDC0uY1279ZSPfw13cZNzc6adUWMniLaWdalVu658G8DL2xtVqlRD8P9CyMfNnnk/lymM39cXOHHyFMKFCS8fNKNrmpGOHT8h3wyw/VaAsU9/lYASUAI/igA/ztiuXUfMd5+PfQcO4aJV2fOjzqflKgEl8G0EVAD5Nn56dAAj8OLlS3Flmj1nLsaOGy/+1sYl8MvFFEboB83pafll4N2790rQ55497z+y9fCRJxYuXCRTzBpfHubX1WkV4ReMaWVgPAW/cLx58xYcP3ESBw8dkfJOnTotAge/xL5+4ybx3Tb8lvkF9vnzF4DT5N62fqiMdePxdMvgtzjok7548VI8e+Yl/s3U9K1ctRoXPxIwS//xVavXiM80v6DMxG+P8KvE9DVnDAo/IMY/lkM+TE+ePpN1ztjFr52vX79Rrmn3nn2mnzXZbNmyzYebA+u3bfsuKUP/KQEloAR+FgFOl164cDHEihXXz9idn1UPPY8SUAJfRkAFkC/jpLmUgBL4BAEKT9RA0k+bwpYmJaAElMDPJkAFCq2zj60fM/zZ59fzKQEl8OUEVAD5claaUwkogY8QKFCgEGLHiosT1m8IfCSbblYCSkAJKAEloASUAFQA0ZtACSiBbyYwdsw4+VL79RuWGW6+uUAtQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAElACSkAJKAEloAQCLQEVQAJt0+qFKQEloASUgBJQAkpACSgB+yOgAoj9tYnWSAkoASWgBJSAElACSkAJBFoCKoAE2qbVC1MCSkAJKAEloASUgBJQAvZHQAUQ+2sTrZESUAJKQAkoASWgBJSAEgi0BFQACbRNqxemBJSAElACSkAJKAEloATsj4AKIPbXJlojJaAElIASUAJKQAkoASUQaAmoABJom1YvTAkoASWgBJSAElACSkAJ2B8BFUDsr020RkpACSgBJaAElIASUAJKINASUAEk0DatXpgSUAJKQAkoASWgBJSAErA/AiqA2F+baI2UgBJQAkpACSgBJaAElECgJaACSKBtWr0wJaAElIASUAJKQAkoASVgfwRUALG/NtEaKQEloASUgBJQAkpACSiBQEtABZBA27R6YUpACSgBJaAElIASUAJKwP4IqABif22iNVICSkAJKAEloASUgBJQAoGWgAoggbZp9cKUgBJQAkpACSgBJaAElID9EVABxP7aRGukBJSAEvjpBJ55eeHhw0d48fKlee63797h6dNn5jr3eT55Yq5zwdvb20ee58+f4/FjTx95fsSKbT1/RPk/ssxXr179yOK1bCWgBJSA3RNQAcTum0grqASUgBL4cQTuP3yEBvUbokCBQihduhxcXEfg7l0PXL5yFTdv3UbzFq1QonhJ1K5ZG7Xr1EPNWnWQI3tOTJ02Qyq1ecs2lC5THlUqVUGNajVQo0ZtNG7igLx58km5Xt7PP1v5Pn2ckSxJMuTOmRspU6TCqNFjP3nMokVLMX7iZB95Xr95g+kz52D3nr0+tv/olUmTpiB79py4cvXaZ0918tQZPPZ8ggMHD2HC5Kmfza8ZlIASUAKBlYAKIIG1ZfW6lIASUAKfIUAhIlzY8EiTOi3Wrl2P48eP49TpM2jQsAlSp06Hux73cOrUKWTKmBnBggXDpEmTcfTYcaRLk07WJ06cAlpOdu7ajbChQiP4P/9i6bIVOHvuHByaNZc81avXwuvXbz5aEwooTp26oGjhIsibOy9SJEuJZctXfjT/cJcRqFylBi5evOQjz5mz5/C/P/5G1249fGz/0Stz57mjeLGSuH79xidPdfDwUUyYPE3yeD3zQp16DdG2XcdPHqM7lYASUAKBlYAKIIG1ZfW6lIASUAKfIHDk6HGEDhEaUSNHxfUbt3zkPHDgIBInSY6DBw/J9tq16+L3YMFw6PARWT9w6IgIFxXKVzSPS5M6DRInSoJHnk9lm9fzF4gaKQoihA0Pz6deZr4tW7dh0+Yt5vrEiZMxdJgrzl24aG772MLKVWuQv0BhXLl69YMs02fOljplTJ8R127c/GD/6zdvMXvOXIwfPwHbd+zEnDnz5NfIyG3Offth3fqNxiY8ePBQ8q1ctRrPX7zErl27sWLlanM/LUSbNm/F8ROnQNczJrqxjRgxEi4urth/4IBsO3LkGHLnyousWbJhxoxZsu2uhwcKFCyK5Svelyc79J8SUAJKIAgQUAEkCDSyXqISUAJKwDeBPn37y4C9e49e5q43b95bKjZu2oIbN2/izdu3qFy5Gv787XdTAOneo6cc27x5Kzn21es3SJk8BRInTIzbd+/JtmMnTuHff/6Dg0MLvLOegflChwqLsGHCy5Y3b96iSJFiUlao4CFRp049XLx0xZr7w5+CBYvA1XXEhzsAbNqyHbly5ZGy5s6b7yMPBQkKUenTZ0SLlq0RMVwExIkdF2vWrpd8tGIkTpQUdWrXRbHiJTF+wiTZ/uDhI7N+rVu3RZeuPRA2dFiMGz9R9t+8eRtZMmcTIY55Hz16hHr1G6Fq1Rpo3LgpypavBMbR0NL0R7DfkCFdBnTt3hPez1/I8c59naVODx4+lHX9pwSUgBIIKgRUAAkqLa3XqQS+kQDdbJo1bY727TrAsU1bn3+ObeFo/Bn7bNeNZePXyMNfY5vxa2yzzWMsG3n46zuf7T5j2TjOyPux7b73G/k+9Wtb9seOt91u5DfK9L1ubDd+jf02v225r42j/HG5RfOW4up0+sxZf7XuY8+nyJ+vgAzW3d0XyLFv377F27fvwN93796a5dFFioJBmJChUK9+AzRv3hJhw4RDg4aNcfHSZcn31MsbWTNlQYSwYVGlanU0adIUTZo4YMnSFWI5MApjnMasWXMwb567bHr56hUWL1mK+fMXYOiw4VIfWmUOHjpsHGL+njh5GsmSpcBmG+uJsfPyFUv8xbZtO6QMChm2aerU6bJ96bLlsjla5KioWKGSmSVOrLgokK+ArI8dMxZ//PYnTp85J+tLly6XY9u3a4/bHvcRL05cESSMgxs3aox///oHb96+w8mTpyXvrt17ZPfZc+fx7t07cVsLHSIUevbsbRwmv3PmzJX8Gzdt9rHdvyvPX7xAn97OaFi/oeX+8OseMrbJPWTz3PH+MvYZ95qx7tevbX6/9hvbjLL8+jXy+PfXr7J8b/OrTCOPsY/rxrLt75fm4zHWvG3btEVbx3by165tezRv0RK79+zzbxNqfiUQ5AioABLkmlwvWAl8HYFx4ybIYImuOIwH0D/7YbBq9Rp/Neq9Bw+R0RrXMXPmbDmWA2Um/r5+/dos75n3c1SqWBkxo0XHcBc3cWPasnW7uZ8LT728kCpFKuTIlh3u7vORNHESsZhcv3nbR77PrWzavA2RI0ZGkybNPshKq0PokKGxb7/Frck2w9p1G8Qti4JLjKjRES50WJw+e97MQqsJ79c9ey0DwwRx40kcCzPs2rMXoUKEQuvWjpJ/wYKFkneGlcs89/myvskqJESNHAUF8xc0y6YAQouKx737ss3BobnUs2ULi3WIGxmoHzJ4SHTo4GQexwUKX9y+fPkKH9v9u+L59CmiRIqqz6Sd9EuTp1hiffzbjppfCQQlAiqABKXW1mtVAt9AgG4qdMNJFD8BevbqC7eRo+HcbwCc+w1Ev/5+/3Gfsd/3L49xtvmTdWt+bjfKNMowthnrvveb69ZjjXzya1Oej/NY627kNfYZZfm17tc+33Uzrsv38bLdlon1+o0yWQ9j2TjWqD9/+w8YhH79B2HgoKGyTHeejp264PwXxE/YNj1ljc6du8mAtXGjpuYuulsZgoin5xOJa3jx8hVKlSqDSBEi4dr1D2MrePAzL2/Eix0XpUuWlrJokfjrz78QN3Zc3L5z1yz/SxY6deqCihUqf5DVzW0k/vvnP5lBynbn4SPHUKlKNVSqXBVNHVpInAWFjQEDh5jZ9u7djyRJkqFpUwcsWbIUEcJFQNeu3WT/osVLETpkGHTr3lPW3ee5C5dp1lm+GCvC8pYsWYJXb96B1pNCBQuZZTds0FAEkPMX3gfFt27VRo4pW6a88Dxx6rSc09GxnXkcFxYvWSYCCOv0LYlTEg8d7irB/JZ75P195Pvetr3HjH3GNuPXxz3Ie5T3rPFrPKPGNpt72Dj+g7zWY41yjf3GunGvG79yPptnwVj3/Wsc7+O8NvXxXZ5xXuO3n69zGOUZx8mvTd2N81j2DxAm/QcOxjAXN/Tu2w8Z0qXHP3/+hYULF39Lc+qxSiBIEFABJEg0s16kEvh2AnPnWgZmKZKlwMPHPr8F8e2lawk/mwCn2Y0XJ55lcL3MpwZ+587dGDh4GJ5Yv/lRtVoNhAoREtd8Bavb1pllGW5M3O7qNlLKLlSoiG02cU8xLBHPn7/Aho2bZcpfI9P0GbMwwdcUu9y3a/deJEyYGLut7k1G/m07duPS5Su4eeuWBIAzMDxrlqwI+V8IH/W943EffZz7ixBiO0DktLixYsRGvbr1pUg3F1epN4P0mVauXC3ra6xWpiiRoqBk8ZLG6dG0SVNEDBdR1m/dui1xM1yhRYRB+Tdv3oTn02eIHSsOnJw6m8dxge5v//v9f1hvE/juI4OuBCgCLVpaBE/fMUgB6iK0skrgJxFQAeQngdbTKIGATmDOXIsmmN9r8Lj/IKBfjtYfAKeuLVW6LCJGiAxq5wcMGISx4yagj/MAbN++EwxKp7Y3WpRoMgiv36Axzp2/4IPdnn37xQrDKXhjRo+Jjk6dcfz4CcnTsEFjOS5//oI4fuKkbAsTOhwihI8kyy9evJTpfhmU3r17T7iNGI0r1z4+nW3OnLkxZMhQOfby5atgUHrlylV9WEVOnDyFtNZpgpMnTykzbrHOPXv1xqLFS2S6YVocDh85KuXw36BBQ8RaQxepvPkKoK9zf9nn+eQpypYtL9fQo0dvTJo8VWYDC/FfcOzcuQvPX75EtqzZZf/MWXNw9PgJtGnbEa5uI9C7d1907dZTPtLIaYhz5cyDGNFjYvPWHeZ5XVzdEDVKNFz7zBS+5gG6YNcEWrRoJfcCrcWalIAS+DQBFUA+zUf3KgElYCUw1yqAJE2UBNf9mOZUQQVcAlOnTUeHjk7o0KETBg0ehqfPLNPmPnv2DH379oOLixsYyN2tW08cOXrMx4Vu3roNHTp2wtSp08ApdVu0aG1+DPDJUy+MHz9RZsLavdvygcAJEyZh8uQpZhnbtm1Hjx690K59RwwZOhxPbL68bmayLlAAyJwlO7yfP8edO3fRvn1HdO3aHXSjMtLyFaswcOBgzJs3H507d8HqNWvBemS1CgrB//lPBonhwoTHzl2WYHEey5iPRo2bgoKEkWjFGDx4KBj/xG+TjB07Xr6FwvJpuXny7JnMcMU8U6Za/P737N0PJ6cuIojR8mEkCmCs6/GTFkGMQkmO7LnQq3dfI4v+BmACnLahSVMHFUACcBtq1X8uARVAfi5vPZsSCLAEDBcsFUACbBMGiop369ELDRo2Ba0nX5oWLVoMusfs3bdfphLmxxaLFy2BGjVqfmkR3zXfixcv0NqxPSpUrAwvb+/vWrYW9msIqADya7jrWQMuARVAAm7bac2VwE8lYLhgcYYjtYD8VPR6Ml8ExoybgAmTpvra+vFVWixSpU4LTlf88OFDPHjwQFzL6Cb1K9LW7TsxcPBw8+OFv6IOes7vS4BzyHGSA05YQOubJiWgBD5NQAWQT/PRvUpACVgJGAJIssRJcf2mzy9nKyQl8LMJ3Lnr4a9Tuo0YiXz5CqB27XqoVLka5s1f6K/jv2fmx489v2dxWpYdELC1gKgAYgcNolWwewIqgNh9E2kFlYB9EFABxD7aQWvx9QS8vLzx5MlTPPZUAeDrKeqRfhFQC4hfVHSbEvg4ARVAPs5G9ygBJWBDwIwBUQuIDRVdVAJKQAkAthYQnQVL7wgl8HkCKoB8npHmUAJKAIAKIHobKAEloAT8JqACiN9cdKsS+BgBFUA+Rka3KwEl4IOACiA+cOiKElACSsAkoC5YJgpdUAJfREAFkC/CpJmUgBIwBRD9DojeDEpACSgBHwRsLSAahO4Dja4oAT8JqADiJxbdqASUgG8Cc+a6yxSTMg3vzZu+d+u6ElACSiDIElALSJBter3wrySgAshXgtPDlEBQI0CtHue4T6pB6EGt6fV6lYAS+AwBtYB8BpDuVgK+CKgA4guIrioBJeA3genTZ4oAEjNqdFy9dt3vTLpVCSgBJRAECYgFpFlz64cIFwRBAnrJSsB/BFQA8R8vza0EgiyB6TNmycs1YYLEuHL1WpDloBeuBJSAEvBN4O07oFKlKtJHTp4y1fduXVcCSsAXARVAfAHRVSWgBPwmcP3GDSxbvhKbNm+Fl5eX35l0qxJQAkogCBJ4/eYtunXvhWTJUmLFylVBkIBeshLwHwEVQPzHS3MrASWgBJSAElACSsAHgXfv3uHtu3egIMJfTUpACXyagAogn+aje5WAElACSkAJKAEloASUgBL4jgRUAPmOMLUoJaAElIASUAJKQAkoASWgBD5NwN8CiKfnE9y964GXr17j3r37uHHzJm7eugX6h795w4nofn168uQphg13xbIVK/1dmRcvX+Lipct48vSZv4/lAa9fv8atW7eFz9NnXvD29hZ/ee/nz/Hi5St4eNyDYZz19PTE7Tt38PDRI7thZ1y0x7376IjddGAAACAASURBVOM8AAcOHjI2fbdfMlqwcDFc3UaBZuuAlPxb35MnT6FXrz64d+/eZy/zwIGD6NWrL3j//sz09u1b9OrTD7t37/3upyWvlatWY/CQYWbZFy5eglPnbvIsmBt/0sLLly/RvUdvHDh0+IMzens/x7jxEzFn7rwP9umGoENgwsQpmDhpil1eMN+33Xv0wtlz58369ertjK3bdpjrH1uw7btWrlyN/gOHfCzrL93OGfZ69+2PU6fPfLYefJ579OqLg348z9euXceAgUNw6NCRz5ajGZSAEvj5BPwtgCxfsQq58+RHqVJlUL1GbdSr3xDVqtdC8RKlMHioCzjQ/tVpwcIlCBEiNNxGjPRXVSgcTJ0+C6XLlEfhwsWwbfvnO3XfJ3j06DFq162PDOkyonChIrh0+SoOHz2OLFmyIWuWbBg0ZLg56J43fyGKFi2OFi3bgMKKPSW244EDh3DXw+O7V+vCxctIliwVypQp/93L/pEFPnvmhQcPH/nrFLVr10OE8BFx/8FDP4975uWFl69eyb4yZcohRrQYeP78hZ95P7bx+o2buHHzlp+7X795g3MXLn6yTA5M9u0/IMKwn4V8w0Yv7+dIkSIVcubIZZZCwZvCDgf8Pzq9evUaO3btMa+fSpK9+/b7KfysXb8JESNGQbfuPX90tX5a+S9evMS27Tvxwp/31NdU8N79B2bf9jXH28sxJ0+dBv8+laiE27Fz96ey+Gvf7j37cfv2nc8e4+TUBX//9Q9OnXk/OOezSyXgpxIVhPsPvFcmXbly1c9B+6fK8L3vzl0PXL5y1ffmb15/+vQZ9u8/iAcP/e4zbU9A5cm+fQf8fJ6paIgcOTo2bNxse4hdLD9//hwcK2hSAkGZgL8FEGpn+UXkwkWKYf2Gjbh67RouXLiISxcvYdaceXYxPeeDBw/h0Lw1Jkyc7K+2XbVmHXbv2Sed+XAXVxEkaPHxT2LwGS0oFDYqV6wkh3KA2bBBQyRPlgJ37903iztx4pTMmnHw4OFA8eI2L+wzCxwUubiOQMtWjuALJCAkvmwXLl7m76ru3bsP+fIVBLVxvhOFkgWLl5qbly1bIYLvo0f+E3JoSVqxcrVZju2C55On6NKtl+2mn7rM52HkyFGoWMHyLPzUkwM4cfI0evbp98WnbduuIwYNHvrF+e09I6+/Ry/nH17N5SvX4NYXDKB/eEV+0gkWL10Ot5Gjv8vZaBl36tIdjx57frY8vm/z5i2Affv2fzavkYE25tlz5+Pe/c8P6I1jvuR39lx3TJk280uy/pI8l69cQa069bBu/cZfcv6PndTzyRMsXOL/d8nHytPtSiCgEvC3AMILPXv+Aho3bY7Hnh92mF7e3nB1GwEXFzfThDp//gIMHDTE1NJSk+zqNhKdO3Xx4eJz+sxZeL94iecvXqBz566mFmrf/oOyvmvXLpMzO1UO5qlBpovLkqXLzX1c4HR4I0eOMbdxoNt/wCB07tzlo9roly8tmmge9PDhYwwdPgJ3PSyuM9TK9HXujxVf6NY1a/ZclCpdDs+8vKUOt27fRsqUabDSZnq+U6dPi5uTWUkAM2fORnOHFjh2/ITtZh/LZ86eFxevdes2oE1rR1y+fMXcf/DwEWFy9NgxrFjxfipA8m7XroOpZWP9Bg0ajPkLFpp1nDhpMgYNspjlX716JcIYXdKMdOrUaXTs2Ena19h28uRpDBg4GFet34U4cPAw+vcfiMNHjhpZ4OY2AnPnusv6o8cWrc/ipcvQrr0T3rx9i5Wr1mDkyNFYZB2M07WvV+++2LjpQ83V27fvcO78RTx+/BjTps8w75EdO3bCoVlz0LXANl27cVNc3kaMGIUxY8fb7pLloUOHo1OnLh9oyU6fPQfey7t27cbw4a6oVasOsmXLLq5E56zuD7xf+vbthx49eoEvFdu0avUa4XDp8hU0aeKAMzYaS+bjfVu3bn2kT5ceAwYMEusDr7tRo6Z4/foN5rkvwPBhLuCzYiRqmPnMDB3qYrbZxk1bkDZNOhQvVgILFi2RY438HNC0bdcBiRIkQtduPXDu/AXZRfeN+/fvY/LkqaAmlGnP3v24e/eucShOnz6DNm3aYsrUabKNFpZhw1wwZsxYnDlzVrbx2O7de2L7jp3mceTctm17H4oI3ocVbAQQz6dP5XwURI20Zu06ODi0wMZPaCuvX7+Orl27C1db10C6apDnq9ev0aNnbxyyug3u2rMXefLkF+vL5CnTxAWS5+N2uj4aidfTs1cfHD9+ErPmuPtwF/Py8pbzubmN9DGzjfF8UgnD9rd9Tmhx4r2zefMWzJ+/UE7z7NkzdOvWQ54vW6fDy5evoqNTZ/C54LNAaxSfPTe3Udi1e49RRfm9evUqunTpJgKSrVvJqdNnxYLK+7FHj944ZHVHYbvkypUHOXLkwtRpM0BrGNOYMeNNlx26sPiVqIhh+8+cNfuD3XyO2A60vDEtXbYCCeInRPXqNbF0mc9+2Dj4/IVLcGLf4eomswQZ20+cOIlOnbqKqyrv+xcvPrT+kRf77p49e4PPk22iO18bx3bSj9lu57K44AwYhA4dnMx7ltvZpsazQCXVqNFjYMuB76FDh9+77cyeMw/8M9L8BYuQNm16eeZ47UxkT8H1/IWLss429CutWr0WrVq1wfLlFvfga9dvokLFykiePAUGDBiMGzf8tmSwXN6jfA4bN26GTTZ9oyjNrBZQWqzbt+9oumg9fuyJenXrI0G8BOjdx9m87ivXrmO/jXvt0WOW9w3r17dv/w8s3zNmzpF+kspGpsNHjiFL5mzImzuv3FsfU9Txexi83t17fLp3Xr95W8rZunWb3EsUwox0+85d7Nq914dyilae9u06YMKESUY283fnrj3wuPveUs93Ul/nfnKv0LV0zdr1Zl7bBY5j6HmwZs06c9rc+/cfWN5xrm62WWWZ7c53KC2otmn1mrVo1rQ5tmzZZm6mZ/Edj3vSDzg79xOXY+7k892sWXOkTpkazv0G4LjNe97VdYS0HfteI7FP4DuVv+vWrQefFyb2JexfmH6GJVlOpP+UwHcm8FUCCB/4Ro0dcPOWpRNhnfgSpHWEA32+UIMFCyYvbu4rU7Y8ihYtAXYsHMh3cOqCRo2boVrV6siRPaccw04/adIUKFumHCZPmY7adeqjVKmymDtvPrbv3I3ixUsie/acoEsFH8bWrdsiYYJE4g9Ls3TGTFkx193y9VG+sNq07WAOOF+9foMZs+bI4KRSxcpo3aYd7j948FGUjx89xtoNm8wBCzM+9nyCVq3bYo7Ni+ijBQCiDcySLSeWWDUdHAiTSaOGjeQwaoYPHT7q44XKDq5zl+6oW6eeaMIPHnpvMjfOtXbdBkSMEBHly1XErNnzUKhgEdGwM9aE++LGiQ/3+YvQrUdvce3icctWrJIBfY3qNWXbw0ePMX78RAQL9hsaNW5quqdQwOJgigPdEiVKiSuat9V1g1adtu07ysske45cwoJlczAbJXI06RC5zmuKHy8hmjRpJlV2d18gA4CuXXugfYfOOHP2nGyfMnUGnDp3leWx4yYIm85dusk6X5o1a9XFxMk+P+bEdmeexImTYsy4iahRqy7mzluA4ydOoku3HmjW1EG0g3yhMPXrPxCxY8WBY9v2IgBnzZpdfIK5jwMOxiVwwM9Beq5ceTF4sEX4osk+Tux46NajF/oPGCz3LgWQBHHjo3qNWqAA8ubNG9H+devWUwbXvKc56GTau/8g2jh2QIeOndCwYWMULVoMfLHZJsYY1aldFxHChRdXRg7mOKjNlCET+vTtj7HjJsqgjgNHprNnz6FsuYqoU7c+SpYsjSZNm8t2Ct5RIkZGoQKFMHX6TBmEG+ehhaVDx86IFSMmmjZtDg4yaHlKkTwlRo+dIM/wuAmTJV4qcZJkYi3gsRzcN2rigMZNmiJHjtyYO89dBPFECRMjUcIk8hwzH/3Oc+XOb7oqDhvuJqzLli2P8hUq4cJFy2Bl7tx58oEuHsOBSokSpVGocFEZtHEbBc/GTRxQs2Zt5M1XwE+N5fnzF1C/YRMR5qpWrY6ixUryUBlQRY0SDSWKlcDSFatRoUJl4fPw4SMZxJBVimQp5RppiZw0aTKYn/cM0/kLFyQehfcA3SPz5y+EUaPHyj7yW7dhswzIypWrIC6m3DF69FiECxMObdt2ALXAWbJkl7pzH+//BPETiXDt3H+QWPk4aB4zdpwID6VLlUHrNm2l/PPnz4u7F/tL9kl8JpjOnDuPkaPHokfPPmjWvKUMMDw8PIRR48ZNZaBPhkznzl2Q6ylRvCSWr1qL8uUronjxUuAzThdSusqmTJ4Sg4cMl36TyotBg4ehfYdOcl1+uczSzZb9LwdKRYoWl2vkud68fYely9mXOKNggUJyHvbnCxctQbw4cVGsaHHMnD1X6mX7b+/e/ahTtwFatXKUfs2IPeDgq0vX7nCyXv/0GbMlhs72WLrhNG7SDJUrV5PrLla8lKnI4mCsVu16qFOnHjp26ioDZCoLmBjDRgVSp87dULVKdRQoUAgXL16SfXzmI0eMjKHDXDHcdQR4Xy9ctFj2HT9xCuHDRcQQqxWMz+WUaTNEuOAzt2fvPixdvgK5cuVGxgyZhA3fedt37JK+wqFFaxHw/LLsTp8+E7Xr1APbsHiJ0uB7lIIA++DMmbLA0ZGC+4eWUgoffF86tu0gfVXuXHlw7bolH90FEyZKKs8a+x/2hxTCu3TrKUo4vqOpYIkfN764vFLwOn3mHNKkSS8Df140lVMhgodA+w5OmOu+EEmTJhelHvdRcKZQzn6waVMHcbfmfXvk6HFkz5odGdKlh+uIUX66E02aPFWut0GDRtLuHKgzMc4qVao0aNGitbzrCxUuhuYtWss+urimS5cRtWrVA5VNTLTitXFsj+bNWyJjxixyfbIDkP4mefJUoCDKREVP+QqV0aZNO/TrPwiFChURzkZ+45fvWr6nevbqK+9Kumvx/hg2zFXGL8WKlpDnz8i/bv0m9O7TD1WqVJP6cbzCtHr1Wjh16ir9fKFCRbFrt8Utj30Dxye8vymsZ86SzYxJrVevPkKHCIly5Svi2LHjUg7vRbqz831RslQZ7NxlKYfHs63Wb9yCpg4t5Tk+dPiYKK34vm7YuJkPJa5RX/1VAgGBwFcJIBQ2Spcuh9Kly8pLuHSZcnBxHenj5TFuwiRkSJ9RHqS7Hu/djg4fPobmLdsIGwoTmTJnw87de3D9+g3kz18A0aPFMH1Vc+bMLR0IM1NDkjtvQdFUc/DOlzZ96xctXiJlcdAQLWoMU8NPAWS8VVsyb8EiOPcfLPkeP3mKaNFiYuq06bLu+x8H3+zAYkSP6UPL5Dvfl6w3bNhIXo7Ma9S5cJHiePrMEuA+buIUU/NGf9C69RqBAy0mxo/ky5v/g9PwZR47ZmxULF/R3Jc8RWpMnTpdLE7p02cUIYMvMmrl6VbGID0jxYuX0LRgjBg5Bp07Wwb93P/Eqm2nsFWrdl3pELmdg0YO+GbOmiPFsP5sJ2phmJz79kOLFi1lmf9oAWvd2lHWKTRu3bZdlqnV5b3DNHXaTFOI4Xq5chWxwKotpkbnuY12XA6QAdBbGRxGjRxVNJKcb511paDgbj22d+++iBcnPqiBokWHAw1jULJr1x5EiBBZtE5Xrl5FufLv3YJ4P0SMEBnXrt8Qq0qypMnlhWsw2blrFyh48ZxM/QYMkj8uX71+A9Gjx8LmLVvkRVyuQmVTOKfWN0miJKArlO+0f/8BZMqUFU+tVjIyiRwximmFcnEbIUIfj5s0aQrKWmNmDh8+ghjRY5nasLx582PePIuFyfc5GNdE4d/QULZs2Rr//f2v6bLF6x0xcjQKFiqCO3fuSv3r1m9kCgEcxGbOkl2Kpd97jZp1cOrkKVmnNvbiZYsFZdKUaTJAMs7PQbDhyjRv3nxTAKFFx8GhpQhyzHvi5CnUrF3P1KZTaGTf4jtNmTpdhCJu5/3BAczatevkuORJkiFenHimcFuyVDnMmTdfiqCGmoNpI1ELHC1KNNMljgM1txEWVxoO6ig80IrD1KlzV0ybbnExoUXv33/+kwHH5i1bESpEKNSv31Dy8dmOEye+WDc5YE2bOq0oV3gPU/lB11QOhJnoyhc6VFiw7RctWowiRYrLdgp9VLZw4Dpy9DgYAxwOMDlpAy0LzRxaSV4qepInTwkO6Nh+KZIlFwHA0OqXLlsB7gsWSV7b62d9+BxTmcDEQGujL5IN1n8NGzXBCGv8HDX/JUqWkT0cOLbv2NnMynths1XrS8H4Y25BAwcORosWlrofPX5CYgh5TaNHj0F56zPIATyVRIaVxjgJNeQUgplevX6LhAmTYNUqi5WzZcs2cGhu6XdoBYsQLiLGjBkneanI4aDZSIyvqlixsqy6DHeV9jMsorSudO/ZW/aRTdo06THSev20RBrW9SFDh2PrVktfNnDQUBlc8qCTp86ItZvLN2/ewrRpM8x+XQq1/qNgvtj6vmJZ1arVlD1Xr91Am7YdYSh7bI/hM8cB+mjrdS1fvgJJEicz23D8+EkoUKAwaGHnO6R1mw5i8aUShy6utAqy78lfsIgIxyz72PGTUsagQRZXw8VLliJ82HBigeR+9wULUa9+Y6kG+zSj/bmBbc5+lYnW8j59/XZvZHvmL1BEJqph3rFjxyNypKjyfuOHTPl+pRLNSClTpgat8k+ePkWNmrXNvoQ88xcoDFpmmBigzvc+rVZMfKdQmUHLD60LterUN+vE906qVGlBK7HvROGPfTytrmRE6xsVn32c+0tWxgCFCB4Sx44dk/u6XgMLD+5s7dhehCguUym5YeMmOaZZMwekS5telvv1H4BIESOZ/TItyZEiRgHdwyk4U8iikoBpyJBhyJO3gCzzH4WO6NFiitJn+YqVSJIkOYa5jJD4QyozHNs54dYdi7W6VWtHifEyD9YFJRCACHyVAHLk6DHky1cAw4YOl0E6O/AhQ10+MJ9XKFceoUOF8YGDmmMGptLFhSZ7xkUYQWLO/Qaivs2DXqZ0WTFJsgC6+BQuUsIcaLKDq1K1pjmjFDXrHDxstwaOt2zdVlxMeGy1ajVQsGBh0aJUqVIdSZMkl4GEj4pZV/hyZ3Chk1Mn/O/Pv8zBs195P7eNQgHdrm7cvG36R1epUsOcYYWdIIUwpvUbNongU616TbBToZaLGh+/Uu3adX3M1EMXnw4dOkpWaslsY1/YKWbPlkPcFKh9o7Zv+gzLoIruRaVKWQZ77LzZuTM4jon1adLMcn5qKjNkzOwj4LBdu45ImiSZ5O3du49YOWQFEFcJQwBZvGQZEiZMjNq16hi75de3ALJ6zTpUtb6Q7969JxpFHwdYV27dvgsOkA03qBu3bokWlJpvatGKFSshGnbqztjZUxjwsM5ARXe4bFlzYPas2aJ1N7Ruxnk4GDQG8rRozJj53v1k+bLlSJMmnZjVmZ8CYs7sOdGgYWOUKllGLBez57gLZ2rojURtMoUwv4JEOYBOmSK1OXAeN24CMmXMIs8Hj+cMLnXqNZQXJJ8bDjBOnDqNWrXqImb0mKKFZL6cOfNg3LgP3cu4j25zeXLnM039ixcvRb78hWQmO6OOHBQ3dWiFOx53sW3bdlStWsPYJb98tnh+JmqbDYHuytVr4jrD7RTm6CJgJGpeOXhgotWQmkMj8dnl88nEwSndw4xEjboxkDa28ZeB+RQ2ec527TsiXJiwoIaVifc4taxMHPDnzVcQg62uhHS9qlSpqqkc2bNvPzJnyioCCIWh6jXqmAMUPouVqlQ3r4/XSs00NfB0IYseNboI+jwPlSO2Lo4ctNNtkImWD9tZlAYNGSZCVYMGjVGnTn1xXeFAlm47dGVNkjgptmzZKsfyH90VQ4YMgz5938dusF9i+xvXHzb0++tnP9GwYRM5ntdPTeyQIZbBJYWGylUs9yOf8TZtHPHP3/+IVtw8oa8FCqscgFEgoHtf5cpVJEfjRk1MrtxAjTuVHBTAKBzbupfaFklXO7YpLbF8xsiUCiUqeypWrioWOU54wWTcZ8bxPJauhBTyO3bsLPe94WJWv34jcZdhXgrP2bO/n+iA98Dcue+tMbTEZs6aAyyProa0IBmTP1CBwYlAjMT7kW69TLTwxo4dF+XLVYCnzcyIvAdp0WEiBwo4fNf55SIkmaxWmaPHjmPCpCmilHBwsFgx6ULFAa5h1TDy85euVjly5BEXQ67TQsABN5UpTI8fP0Gz5q1lEgmus88KGzYCxlkFFm7j+zpHzjxgH2ukmrXroo9NbBS9DSgUM02ZMh0VK1UVC8TadevF6mIcx77CUGbQHZAuSX4lavM5GYuRqLhJnz4T5sy2KLHKV6wibqbGflpoDWF0zbr1cGxneZ9t3LgR2bLl9DH5R506dZExfSY5lPdGk2YtxMJ85MhRZMmSw3T/5GQXVGyxD/YrUUk5y1of7u/Zu69YyjguqVy5qggTtP5QYdKubXuzCPYTfBZPnj4jyo1KlauKIJI/f0FwfMF05tw5GZ8YE4pcvnoNsWLEFqUJ3fs4MYfhDcB4UcMqymNv3Loj1pmt1j6hRs26Pqw4W7fvQJQo0dHSRulnVk4XlEAAIvBVAgj9OalJo8+6kU5ZTZKGLzQHsj179kGK5KnEncDIxxcMYwaaObQUF47KVaph9WpLx+jUqYsPAYTuE4bm7NKlyyhYqJjZmVCzR0uF4fZCjX/mzNmx2+o33ba9k+m/zJfe0qXLJFieg2lqh1gP3y87o47Gb/XqtcA6fW2iRqtI4aLo6NQFGzZZBhiDBg4WN6GVq9eZLigsn64LdCuiXz07UgpcfEFz4OE78WVHc76RGjVqjNatLS9QamdWrFpj7AJdR6ZMmSZxIsePHwddUzhIoaaVL+PuvfqKBoezA9nOwsKJBuiGw3T69GnpEG2nRaT7Uu7c+WQ/B5G2mrDhLm7iGmdU4uDBQyL4cCBknIOuRa0d24mlwshHFyPGg9Av92Ozq/Blype1oSGmBpZ+vhzgkNvZc+fkZUVf8r37DqBo8VK4csXiN86po3nfchDOwStncbNNFFLdrZpzarbmzrVo0Zln2bLlYq1gmzBR0zV48FA5H+8parOoaeQAs1mzFmaxHHxTQ+nXbMMUQOhSwMBSpkmTp0i5fMkzUbCvVqO2uM5QY8yBEbXknEGKPuj7D1gGDLQGjB1r0fzKgTb/6NZFAYgBmUwckBcvWdoUiLmN7ki16jQQt8QdO3eJ+5RNEbJocYaw+H7ThYa+62wrIzHeiZpUvxIH1Ib2mfs5BTPdCZgYg0MXOd/J95TeXKeSg5ZWDgo5iKVViIkCH4UFJg5sM2fOhgH9B8o6Xdk4mDISNakZ0meSASsHz8VKlDFjLTiwKl+hirgUMT+FWQp29Mk+evSY9BvcTitltmw5sGjR++ulkMI4NyZaVQwBhBYGBsEznunUqVNSDt2D+GckDuxjxowtA35jwMLBfPJkKUXRQysHE58rTjhw5sw5ZMqYWQaK3M7rNwQwWjlYtwEDLNdPZQQHSLaJmutIESKJYEYLme9ENyQOULds3S4xR+XKWmara9K4CUaN+jDwmv0oB3q8n/1KvNYGDZuAbi9sMwrZtgHrtWrWRrx4CbDMGhdhWwaFG1ro6CbEZ55W4fXrN0iWtWvWifvptGnTxY2rZs06Ej/DnRS6Z9sMLhmIXLZ8ZZml8fiJE/JMGjFPtDLVtwqwPJauZ/37DzCrQdccDi4TJ0pqnpvuSi1bvRdavJ6/ELfTsKHDyaDcL7/8zVu3ibKE8Qx0s6XQysRYn3oNmuCGNabGPLFVAClYsIj5ruBzTOGJs1oxUVCkBZFTeBuJ92ysmLFFKKWQS/fNbNlzmbEQfEeXLlsezlZtP98JFFppbWJi30wrIp/5NWvWyrTVRtm2v06dOkuckO02Y7l4sRIoVLCwsSq/dG+cM8ciFNKqYqvg6djJ4pbNjDNmzkKrNpY+gQJYsqQpzFhM7qdlkgIT06lTZ+R94OXlhT179iJlqrSi8OM+TllMBdRxq8VWDrD+e/rUC81bOpoxWlSKNmjUFCNHjZb3HZ93jm8ePvKU56d9uw/7KL5n6bbJsQnz0w2MAiLfq3K/lask71tLXW5Lv3To8GFxl8uYKYvEMnJfmlSpTWsq1xl/SmGN7cE4Wyrc+F5j4nuAadPGTcKFLomGO6ns0H9KIAAR+CoBhK4XRYqV/KDDpIaFPrJMY8dPBANxmeiHbrjI8OWXIEFi2c5/9G80pjOk7yq1hEbi4NnQsHDgV6p0eQle5X5qNXisoQmnibp0mQoSnMz93Xv2wSLrgGjocFd5eRvlcjB4+cq1TwogFFJ6O/c3fTGNY/37y5frX3/+hTt3LS96aj/ix08Eh+YWlwSjPPr51m/QRDQrxja6KxlBjcY2/lIwM7RnXC9QqKgIWFym5t5wi+D6uPGTTLMy15nYUdLkzEThhLOq0JWGwZlGolWqbYdOssrBPF2FGAdhJAp/Y8ZNkFUKibbWGmoLs2XNLu4x02fMMmMBKLDQx5WJ7cc4FdvEIGreGxR+PpauX7+Jxk1byACSeajlcmjRBlusL09u46CKg3gOWNKny4gNGywmcr4UcubKJ9p/urxEihQFe/ZY7lfGJxUvUQbnzlvm1+/QqasPjSFdJ5IlS4kHDy1mc2rsqOUzEjVcFIapRaR7lKHFZ7wANaPk4DutXr0aadNmEBcODn4ZC0N3KrohMNGVrZn1PuFA3dCu8YVDVxQjWDFPnveDLb78bBM1rvTjp5aVgc50qSlZqqyP+CYOBsmQwi7PTcGMfv1GunDhkg9BmC5SjRo1AeMJjMR7oU69BsaqaK0vXbIIPRzE1KhRy9zHgW3HThaXJAq1HOzYCrdcNl60xkF0m6GwZqT48eKbGtTateqaFgDu533GYHmmUaPHSdwMBwa0ytCVKnPmncJuxgAAIABJREFUrDBmGuPA2fa+TpcuA4oWKSbWQLrztWhp8U1nWRxI0wWCbkKpUqSSQa+cBBDB1rAAdevZB5xim4lWB2q86fplm65evQ66ctHqxcRBaezY8UTwZiC+4Z5RqXI1sUbOnTMXWbPmMIuIGzsu6NrGRI2wYQHheu7cecXlhcvsF+vVbSBKh3UbNokV2FBq0J2GfvK2ic8OB+pG383gWvJgovBKC62R2CcbioBixUpip3VaWtu4EsZt0apNhQUTrcLJkqWQZQ6aec8z8VsxFMJ8T1fNvoADVyYqEOgeZXzzgTEpy1euFvdPKrVsE63IjM0xkourm2nl4KQDRYoUM58zCvqMwTESg7gNFx8GMBvn4/1guMExrqJHz15yL0yZOlNi94zj6XpEIds2kVOJUmXNSTM4QG/QwNJ/sPzqNetIn8X3Du8ZI9EqTUGU/TQThYk4seOKlZnrZM1nl4NlHjXcZYRxKFKnTgdO3ELLYYZMWcXVmTtpCWbbu7hZpqlnG7H9jOBqvqtpCWSiUoeKPVqYjGQErNNLwBBibNuc+WhxChk8lOmedPDQEeTImVfqz/0ZMmRC+/YW6wnbNWeuvFhtFWAXLVlmztzH2DkKIIzRMhIH3Ub/xGeafSQFd/a/7N8Z3M3EY+lhQQUH+3ffie9K2wD1vn0HiPuXbT5y5QQZFJgMYZ3WHLqxcV/TZi19TBhDZQHvYY5pqCSicoqJlkjG/dB6cujQIXGh5BT9TJxcIVyY8Kb7NRU72XPkFmUd24YWrutWazLfxXyn875gKlK0GDp0dJJl/acEAhoBfwsg1NaULVtBgnsZTMcOyHhR0+xNf1ZqKFKnTi+aWwLhOgOwOfihT3m6tBlkAE2XBU7nS0tDv34DRKtHLRMH3gw8CxUitAT63bx9RwLKQ4cMI4FbfPFxEMYAs/4DhoiQwhf1iZOWwNL5CxeLiZODCXb81NA5deomrhQMpKNw5DsomPWkaxL3t23bDq1bt/PhBkRTL82khiD1pQ3Nj5rlzp3fh7WIAyTDRcS2HGorGZhGjSi1vR8TkujORHM1NaI1a9XBpCmWeBZq0MOHjSAuF9QSG4kfdWrYqKlo6PjSYadtJL5Y0mXIIkyMbezcGLTJID0jEJxa+oKFi8pAsnnzVhKgx86UibN/cJDL+2HqjFky6xA1cBcvXsbIUWPQqk07cWmg69C2rdtEAKLGnMHrvCeMxJcv/b2pKfUrsd05c1akiJHF1cnwF+fAj9YTaoHZmR86ckwOZ9sz/oEBhrTGMVaJLgVG4iw/dNuju9jAwcPAufiZGAybIEEimUHJmHWHvuHUWtWt1xCPPD3Fx5uBkdQu04+YAcSGSwcDgTlI4LNBSx8FEN7fvhOZ8sVLy9CmrTskLoLPCS0GFFjpUkJfaQ4cORBjXTnooQUiYviIEkDPMt1GjBJBhi59Rh2Mc1HLzwB7PrPrNm4Vl7H//g0us8oxD610nbv2EP9kw3JDIZkuVU2bNkP79k7YvGW7D4Fg4sRJ4lvNZ8I2derSXV7gDs2ai6aeL2x+ZJOKhP/++U+EeQ6s6WoXMUIkjJ8wUQ7nTFFly1WQZ49tddIaJ2RbNgfEdGekq6HbyDEyKOPEFLQ6JEqYSAYaVIxQ0AoVMjRSpUwtgyZaK+h2yOl1L166IsHoZMyZl5gYBEqf+urVaoB+/blz5UXiREnMgSJd4Ki9Zxv3dR4glkkKR0ULFxVXOE7awP0jRo2V2fsYL5A0WQoRFuj+xnTnzh00bdZCBFbec/MXWAan7EurVq8lzwj7HVoSWTb7L7oEtWvXHn37DRLFwI4du+T6OTimPzhjQBiDx4E3A4zpynf9xi3s3L1XBjOpU6WR66eCJlGCxBJ0zu+hcBKCVm3aSkB8rz7OMpCSSlr/cSBJl9WsWbKCcQoURqNEjmq6fDZp1lyeNcYSMcbHsGRw9i1amink2w5GOZhm/8/26NnLMjFG5EhRJJCf74K69RuLhaNRwybSloYLqFEnWo7ixo2Pli1aY8TosdI2lSpVkXcL73fGgNDKxWeAA0/jWxfs4/g9J/andBVi7AqVA7RMkDXvAbbVjVu35R0TL24CLFm2QnjwfuEAmRYJxtxRG88ZmPjxTPJlmjVrjkz4MXrseHHhowWYlm7O3siAYrrK2Sb2XRSkCuYviJGjxqJEydLiTkbFCetEixZjQmhZtBVAWAbzx4ubUMruN2CwTDrAIGdaXXj/M7aCFhmPew/g4jZKJrGggMQJO27ftvT1dFfjJBz0VNi0eRvixY0vrm98XmjpJQ/O6Mfnk0J5uLDhwUklmDiZBgO9GXc5UGb6sgR8My6LzyQtV77bjceNGjVG+lEqnTjpAS3STGRBqx0VOGwzfnPLmICE762MGTKLm7QRx8T3eq7c+eTdQ3dj3pdMrCs9HVhXQ0Bh8DatILw/h7m4IVGiJOJSSgWRkWhNnTx1OhImSIx8VL5ZY6Lu3L0rrpK8jzt06ISly1eaFjWZeKFQUXlXUFFKV1gmelaUKVMB9ELgtRjbWQ9eFycUIT+6tbJfZaJlhi5xdA01LKG8Rzn2oTKvc9fuMhEF83IGMcY2cZIF4+PIw91GSVwnZ0SkoM1y2e+z/zYEVTmR/lMCdk7A3wII3T/69Okrrio0x1Mzyw6hTx9nMDiWacaMWeISYmizqA2iS4AxzSq1srRssBPnC4zBiJyKlA8PXYsYH0LTMrXHzMMBMQMuJ0ywvGg4sKEbGF8qnImHU4Ea02Ly/CtXr8HYMeNAdydOrclEpdKAgYPQ1rG9OU2l7LD5t2jREnlZdencFet9zR1OrSG1mNTU+TfxJWfbQbMjZtC9X4kxCK1btQE7d2OA7zsfZ7OiKw5dDzjAN5K7+3yZyYlaVVvtNPdzij92ZsbsGsYx/OWsKLYCC6f1ZbuOGjlKNHnGjC7U7nAaXr8G07R8tW7liPUbN4kW17AA0NJy+ux5dO7U1ZyKkYIBgxIHDhzkY9rOJ0+eYN78ReasXLZ15DI7awYKurq4yixetoNtug61atVapn/2sLpJ8eVKlzRahDi4M2bHsi2XPtucptN2yl9q10bLvT3MnF2Fxxw+fFSC4KlNZ+IsLbynOCjkF89tE+MkOJ0lp5BmkOrHEjXy06fPkMEDBalhwyyBrpw1bSKnCB05WjTg5Dh16jT07tNX7vUNGzZi3HiL9piWDbqD0Q3Ar7Rq1RoJeCY/uknxuxzGsbR60eXCxcUVY8eNN1+4nIHN0bGdBEj6FajM2XLu3X/vRmScl9Nvc2ronTstU2YziJaMOaUwg5SpNeTkEEMGDwHvVyMtXbpcXr6chvpjiVPe9uzRS3hwFh4GHPO6Z8yYaeknrt/AkSPH5Lkgf1oZmOj2Y8RrrFi5SqaFZqCwMdBjUCj7I05LfP7iJR9aeLpmsCwKqYZ7J90jOUkD+56hQ4eJUGPUmdpL9mesFwPsjUTLEoVjTtzAgbORWD4D3Rm0apt4HRRW+JE1I1mm/O0ps90dOXZcniGeh/fP7NlzpZ/kYITrluu3DLp4XQsWvhfq+TywL1hndWUyyjd+OUUzn3EGx1LzSjfCeda2oqaZg33WzWDK43gfcTuFCt+JQhVnAevRo6cohNi38t5mojsol9nf+JWo1aeA2smpM/iM7z9wUOrFvOxHOBEA30PDh7tg4MAh0iackpmJAzZaXji44zTATBTIOTUs+TAGjoICBWq6MHIWQT6PdGEaPWqMOWsWB7oc0DM420gUGlhn2ymoGUTN+8RQhBl5jd9Lly+ja5duYvVhEDH76U3WAOnVq9dgxEjL5AdGfttfKr54/1DZY0zBTFcqvjPdXN1EmDeUarTuckBuTCHPcjglMRWFdIFlX89ngsyoqWffxym2165bJ1YYvoPJgJyMxOmT+Z6lJt+YYpgKCAq0RmC+kdf2d8GCRWIdt70vOP08hbmNm7dJO9i2PftRzkDGd7/tt41Onz4r/RGVbkZif8b3pMtwF0yYOMl8ninUsa48ntYa62RaxmFi/edYhH0873FbLwNai6hUoKKH09/aJvYZbR3bySQDtts5HT5doNkfGUoZCuK0sDAAnvFERtyScdzJU6cwatRYH23E+5DPleFSxedm/LgJcn28zkeP3it8KByzzzKEGgr97FuMab+N8+ivErBnAv4WQOzlYvjy57SuQTFxlpBNG+3r40rfox04SFuweNkHkxl8bdlnz11AyzY+40y+tiw97j0BCpcMCn5nTgHxfl9QWHr69CkqVKwikxwEheu1x2uk9WjAwA8/GEmB13BhtMd6a504pb0nmrdyNKdTDoxMqHiidUaTElACHycQIAUQainot8spZ40pXj9+iYFrD7VXcePEk4960T80MCR+3IqaeQqVHNwamulvuTb6A9O/nH7Q1MJp+jYC/DgoLRjLlq9Az97O5gcMv63UgHk0Y1o4DTWts9/jXg2YFH5trRk/kzNnXnENoxWCs4hxlq016ywB6r+2dnr2TxFgf5w6dVqMHTtB3M8+lTcg7qO3BF3jJP7QOkVvQLwOrbMS+NEEAqQAcvzEafG7zpMnr4/Zl340LHsonwGtDDpm0KnxVXF7qNe31IFxINlz5sHGze+nIv2W8ngsA/w4zS59rjldpq271reWHRSPpzslXR45BejuvT6/ahzUeHC6cE6TzTiSZ9aPTwY1BvZwvZzOmcHTnF6cM50xbs42/sQe6qh18EmA7rycwjh/vvzyIULGOQa2xBhWxnxxxjbGi2hSAkrAbwIBUgAxYhL8vqTAvdX22m2XA/JVU4vMGW2+Z/LNRjXV306XTDktqqb3BJTHexa/aolBxcaEFL+qDnreLyPgux/23U9/WSn2nSswXpN9E9faBVQCAVIACaiwtd5KQAkoASWgBJSAElACSiCoE1ABJKjfAXr9SkAJKAEloASUgBJQAkrgJxJQAeQnwtZTKQEloASUgBJQAkpACSiBoE5ABZCgfgfo9SsBJaAElIASUAJKQAkogZ9IQAWQnwhbT6UEAgsBfjTy1OmPf2AxsFynXocSUAKBm8CZs+dgfLwycF+pXp0SsC8CKoDYV3tobZRAgCCwd98BlCpdHvwmjyYloASUQEAlUL1GbSxbvjKgVl/rrQQCLAEVQAJs02nFlcCvI+Dk1Bm///YHps+Y9esqoWdWAkpACXwDgS1bt+Pff/9D5cpVv6EUPVQJKIGvIaACyNdQ02OUQBAmcP/hI6RLmx7BggVDmTLl8PL16yBMQy9dCSiBgEqgefNW0o/Fih4Th48eD6iXofVWAgGSgAogAbLZtNJK4NcR6NXbGcH//gfRI0XGv3/9A/f5i35dZfTMSkAJKIGvILBz1x5EjhgZUSNEwF+//Y6GDRt/RSl6iBJQAl9LQAWQryWnxymBIEjg6vUbyJg+I0L9+x8SxUuA4H/9jRIlSsHL+3kQpKGXrASUQEAk8O4d0KxZc/zzx59IGDcewocKjXhx4uHAwcMB8XK0zkogQBJQASRANptWWgn8GgL9BwzGb8GCIV7MWIgfOw5iRokqVpCly1b8mgrpWZWAElAC/iRAd6tI4SMicvjwSBAnHhLFTyD9moNDC3+WpNmVgBL4WgIqgHwtOT1OCQQxApevXEWaVGlEW0itIQWQRPHiI0zwEChSpBhev30bxIjo5SoBJRAQCTRs2AQh//0PcWNYFCnsx6JHioRYMWJi/4FDAfGStM5KIMARUAEkwDWZVlgJ/HwCb9++Q4cOThKwGfq/4IgbKxaSJEiI2NGi4+/ffpPtEydN+fkV0zMqASWgBPxBYN2GTfgj2O/SZ0WPHFn6sbixYiNcyFCyrWrV6vDy9vZHiZpVCSiBryGgAsjXUNNjlEAQI3Dy1BlkyJAJqVKkRNYs2ZA4fgLEiBwFqZKnQJ7c+RA3dlwULVYS3hoLEsTuDL1cJRBwCLx5+xZ16tZHnFhxkDtXHqROkVLcSJMlToJsWbIhXZq0iB8/Ibbt2BVwLkprqgQCKAEVQAJow2m1lcDPJHDnrgd279mH23c9cPX6LRQqWEi0hfweyOMnz3D2/EUcOnxUBZCf2Sh6LiWgBPxF4M2bNzh85ChOnj4Lz6de6NGzt/RjeXPnxaWr13Hrzl3s238Q7O80KQEl8GMJqADyY/lq6UogUBIoU6qMvLgHDRoSKK9PL0oJKIHAT2DkqNHSjxUuWDjwX6xeoRKwMwJfJYCMGDkavfv2B6ey06QElEDQI1C8WAl5cffu3TfoXbxesRJQAoGCwJAhQ6UfK5CvAF6/eRMorkkvQgkEFAJfJYCsWbsOCxctgcofAaWZtZ5K4PsSUAHk+/LU0pSAEvj5BAYPfi+AvHr9+udXQM+oBIIwga8SQIIwL710JaAEAJRQC4jeB0pACQRwAiqABPAG1OoHaAIqgATo5tPKK4FfQ6BEcXXB+jXk9axKQAl8LwJD1ALyvVBqOUrA3wRUAPE3Mj1ACSgBdcHSe0AJKIGATsBWAHmpLlgBvTm1/gGMgAogAazBtLpKwB4IlCxe0hqE7mwP1dE6KAEloAT8TWDwkGHSjxXMXxCvXmsQur8B6gFK4BsIqADyDfD0UCUQVAlw1phgwYKha9duQRWBXrcSUAIBnEA/5/7Sj+XImh0ahB7AG1OrH+AIqAAS4JpMK6wEfi2Bt++AAvkLWgWQ7r+2Mnp2JaAElMBXEujTt5/0Yzlz5MKLly+/shQ9TAkoga8hoALI11DTY5RAECbw7t07LF6yHIOHuMhXg4MwCr10JaAEAjCBvfsOYNDg4ViybAX4lXRNSkAJ/DwCKoD8PNZ6JiXwUQJe3s8xZco0jB07DqNHj8WNmzfNvA8ePsKChYuwavUaH9vNDN9x4d69e9iydRseez756lKPHD2GRYuX4tz5C19dxqcO9HzyBN7e3pLlypWrWLp0Oe7ff2Aewg+kqjbTxKELSkAJKAEloATsjoAKIHbXJFqhoEbg/IWLyJQxM/LmK4C27TqgjWM79B8wGD169MK1a9fx6NFjFLcGfY8ZM+6H4lm7dr24JEyfMeurzzNv/kL8FuwPNGrU9KvL+NiBz1+8xMrV6+BpFZBcXUdIfZctW2EesmvPPixbscpc1wUloAT8T+DOXQ8MGDAI48aNx9y57rLcpXNXzJ4zz/+FfeIIWlQ/le56eMDFdQQOHTr8qWwf7Hv2zAs3bt7ysX358pVo27Y9unXrARfXkXjxIuC5Xb2lD6w1LVu+AitXrTFW5fflq1fyzvCxUVeUgB0SUAHEDhtFqxR0CJy/cAkRI0RCwQIFwReHkY4eO45o0WJh0qSpsmnx4qUy0KaV5FPp/avpU7n83kcXhP+zdxVQUiNblGVhkYXFHZZd3HVxd4fBncF9cBnc3d3d3d1dBnd3d3e4/9zXXU1mGMGh+e+dM9NJpapSuUmqnufe/QdYuGgJTnuxXnyKe8LNW3eQIH4iVK1SzfsTfUFpx05d0btPf+mBjMvVa9exYNES3Lx5S8quXb+OrNlyYtSosR+c5e3btx+UaYEioAh4jwAtr1279USIYH8hRPAQ6NKtB5o1a4GA/gOgRXN3UBnwJbRh42bMnbfAzy62bN0mc1/fvrb33s8GAF6/eYP+AwZjx87dUv36jZuoX98NcePGR8OGjdCzV2+MHTsBvfr0R6VKlXHx4qWP6faH1xk5aqynuTlfvgJImy4j3tqFuPsPHmLV2vWe1pIfPmgdgCLgAwIqgPgAjBYrAt8aAWoYkydLIYvrsROnPjjdpctXsf/AISmfMXM2Avj/Xdy0Ro8ZizJlyuPajZue2owcNQbp0mVEly7dHeVdunRDtmw5MH36DGzavBV58+TD2LHj8fLVa4wcPRbZsmZ3aNAeP3mCRo2aoHXrNjh0+Iijj7XrNqB0mfLImD4TJk58LwCdOn1WxsFFcPaceY76HHfSJMlQs0ZtR5nXjc1bdyBrthxo1tzdcahFy1bIlj0H5s9fiEWLlyJXrjyYMHGy4zg1liGD/YVY/8bEiJGjcfLUGdSv5wZeI61EXINz5coteBYuXAQLFi1Fmzbt4FLERSxLd+7eA8ecO08+MPhUSRFQBPxGIEP6jIgZIxaMcqN2nbryjnXr3lMaP3v2HK/fvBfuHz1+Iu+i6ZlKAf4ZoiLg5KnTSJwoKWrUqIk7d+/iteUbHLT6vranxGVmqrPnzmHTlq24/+CBdEEXTCP8vHr1ylt30Tdv3qJH737gnPbo8WOkT5dBxrx02QozDPndsWMnIkWKglWrVnsqp0un1a2T/Tx+8tRRx3rMUQhIJq3Ll69Yi7zdfvDgIS5feV/vxYsXoBuuocePn8D6XRK6nI6fMBERI0TG6jXrQOsOsTl+4hSOHD0uAsiTp8/QpGlzVKlaHbdu3ca9e/cktTDvzcuXNuUWFTU89kaVMQZq/f2BCKgA8gPB11P/fyOwc9duWRTLlavgZw76adNmIHCAgKhcuSomT5uJv/4MhlIlSzsAdG/ZWoSZbdu2o2zZ8ujcuascGzd+opyDWsxNW7ajbp16st+8uTt279mHiOEiIFnS5GL5IIPRvn1HOT56zDhpT6Ema9YcEhfSuFETpPwvjSxmFy5cQMSIkTFt2nR06dwFAX4PiH37D0qbK1evS581qtd0jM+6MXr0WGTMmAUrVq5CfbeG4FhIq9eslXP/GTgIVq1Zj8IFCiFEsBCgJpY0fMQo+PfnD+nSpsfBg4eFIciXN7+02bRpi9SpVbsu/goWHH369sOly1ckFoXpgrNlySbHyYxQ6GE8jZIioAj4jsCDR4+RInlKJEmSzFGxXTvbHNG+XQcpq+RaBbly5sajJ0+xdt1GhAoZBvPnzZdj02bMQu7c+ZAmdTpMsCsvyCgXKlwUAX/zj9T/pUaHTl1FUGCDYSNGI0XK1GjcpLm0p4tUw0ZNUbxESezaZbNm1KhZWywZQ4cMw9hxE/FfilSYMm2G1Df/Vq1ei527PGR32vQZMkd079HLHPb0e+LUGVywWEA2bdmGfPkLieLm2ImTUpeWmnhx46NcufJYtnwVXFyKo0qVajJvms4439Su6yZz5OTJPruw7tt/ADlz5kGKFKkwafJUaX7pylXkzJEHWTJlwYxZc9G6TTtkypQFR4+fkOMbN29BuDDhESFMWOTJkx8cIxVDOXPmwuQptj4mTpqKv4IGQ/w4cdGgYRPBMm+e/EiQMJG40LGjFi1aiYX4+g2bxdiMXX8VgR+BgAogPwJ1PaciAIAuCL/584dWrdr4iQdjMshIT5hgc8mKHSMWUiZPIe3oE0ymnIv5nr37ROPPumfOnsOt23cR6q+QyJ83v9SdYBdIOnXqLPtVKldF8KDBHcLD+g0bEfzP4KCvNClp4qTgV88N0ZWBrk9nz55D9Wo1cPjIMXTsYGNI5i1YKNXoFpU0SXLUrOm9BSRzxsyIFiUqjp84iXp1bQIRx0r6J9rfYE5+UudOnfHH7wFx4qTNOkS8AgcMhIoVXeU4/3Xs2Flw2bp1m5QNGz4SQQMFxaZNWx116GKRKGEi0OJEevb8y1xHHB3rhiLwiyPw8PETZM2SDVEjRcHkqdMxbNgIBAkUBMWLlcSt23fk6ps0bibvYKvW7bBh01bUqFEL69ZtwLp16/F3tH+wYeMmzJu3AH8FCyFzHhUdc+bOE2VC+/YdcPPWbemH1tf8+Qri+vUbqFatJqpWrSHlM2fNlv7p/kUaPGSY7MeNHRebt+1E9qzZEDliZNDyYqha9VrYvn2H7FaqWFnqz7fPT6aOd78rVqxC6FBhsW79BlG60KJ88eJFmTtixYgl/QwcPBRjxoyTbSNUMWkH3U4ZX0JBhMLDjBkzPzjF1avXECNGLAwfNgIXLlxCjBixsWrlKtAqREs15+369Rtg8bKVCB40GLJnyy59PH36DK6VKiOAf/9iFX7+/AU2b7G5ptFSTNq7b5+sJ3Xq1BOLE3EtXaqM9Dl8xEipc+rUacyZO18sKFKg/xSBH4iACiA/EHw99f83AnSJ4oLDBeTho/eLp3eoTJ02XequsGvuo0SKjJzZc0jV+w8fQ9wk/o2B2nXqgZaKZk2b49q16yJYkGmvXq261KW1guecOGmK7JcpXRbhw4bHgUOHZX/WrDn4M8ifWLN6jez/G/1fuBQpKtte/9HrYsSosUiZIiXChAyF5StWSZVr128iWZLkqGY/p9d2tEbEiP4vWrq3QYsW7uL2RaGFi2z4MOFQvFgJadKkUWMEC/In9u4/IPuMTfnd328oWaKUo8vGjZvCv7/fQMsPqVu3nvgzcFCJYzGVjh0/KddMBsZj7wHBxRzTX0VAEfAZATLWFEASJ0yEJk2aw9W1igj91hYDBg4W91Cvbo0pkqVA9GjRpeqbt+/wd9S/Ubiwi+yTEeY8NMLOGDP2LEzI0EifNi3mzZsv5+Nx0vnzF6QuA8dJS5atQIDf/KNhg4ayX7pUaYQNFcYhEHHMs+cuwMOHD8VtzMWlmLSfM2eu1Oc/KlCovJgydRrmzV8g1tU7d+9j8OChovQYMGAgBg4cLO1aNG8p7fLkzovwYcLi0eOnkn6c4xs0aLAc271nL4IHCYaqVaqK1ZXCQ6r/0oiyxnFSQCweoUKERCv31hg3foL0nyN7TqlSr56b7G/cuAmv3wFRIkZGqpT/OZpzHDznzt17pOzg4SOIHDEKSpcuK/ucQwP+HlDmVNPowcPHIjxmzpQFL16+wq1bdzy5y5l6+qsI/AgEVAD5EajrORUBAAwYzG7/ovhGuwuRFRj6+dI3mMQYCy4+SxYvkX0u5rlz5ZZtxnPEjxsfSRMlkX3rvzVrN4gmrVKFilLcp3cf6We8PZi9QoWKiBAuAo4eOy7HmU0qWNBgMIJOgngJkDKZzdJi7ZcMQ9o06TF1yjTRdHJs6zdskio3b98Rt40GbjYGwdqO2ymTp0SKZMm9FuP23XuIHCESihSyMSnuzVvK2Bm3QdrDHu+4AAAgAElEQVS8dTtChwyNkhbXs9at24oAsmfPXqkzZOgwEaDo3mWI/s9ly5ST6x46bJRoG80x/VUEFAGfEeB7Hi9OPFSoUMnHSu3atke4MOEkrsNaie3S/Jdaim7duSfW1OhR/5b9I0ePyfvYp3df2WeMiDDcKVKia7ce6NatO2bMtGXbWrZ8hdRlVkAS4+E43xjBwKWwC6JEiIRr12/I8f4DBmG6vS2tLS1btpb6CxculuP8R4tr85atkChBIjnWtGkL3Ll3H/37D0SwwEFQvXpNiS3r3bsP9u/fL3EmGdJnkPmJlhZaSDiGIUOGSp9r161H5IiRxP21R8/e6D9gIGhN8UpMoBEmVGgULVIUtP707dMXbPv85Sux7NIivmXLVtx78EgUQ9myZHV0wfmU52QMCGnvvgMIHzYc6MJLOnP2LP7wHwBuXubdnj1tc/606TPx8NFjPHniu7JLOtN/isB3QEAFkO8Asp5CEfAJgeMnT4lvMRl9ppl8+vQpHjx4gC3btmPkmPGOAMshdreDyZMmS0wEM9Ekip/Q8T0MujtxcRo0eAieP38Ojz37JEidixXL8+fJJ0No2cJd9nv17iMpKKnd5KK3Z68txSUDHVmf2jlSndq2gFPGglAgGjR4qDAaHB/r9es3AEOGDJftGfb0nOcvXEL4sBHEcmINLjUY1K1TX+ozpTDHOnfeQjAV8fUbN8QtI22qNHj+4gWqVq4i9bZutVk3KIiEDxceSRIlwYVLl6U7Mj8cx8KFi2SfWcK4P2XqdAm0ZJAqadmyFQgZIpTDL1wK9Z8ioAj4iUDKFKlAS6lP1KVzV2GWL1y0vZOmHlOLx4kZW3aZACJY0OAoX96mCNm5c5e8p+Pt8wxdRUMGDyFJMUx780uXIb7Tve3CCi0gNtfV1lKleNES+DtKNGHiWTB67HiJJzPt+bHBQAEDIV269Hj81Pb9IHOsUsVK0jfHR2LaYZ5r9x7PKX8pQHGujBguvFhVdu32kHom/mLvvv2yz3naSmb+MWWMiWH/ffsOMEXyywD0smUryDHGujCkP3KEyOJeZirWqF5DrnvLth1iWbFZQCJLLArr0D028B+BHS69JnMhLcCB/wiE1KnT4tlzm0LL9Km/isCPREAFkB+Jvp5bEQBEK0VfXQYl1qpdDwUKFkav3v1w7tx5wef2nbsoVNgFsWLGRqfO3dCmXUexIsSPl0BcB1iJmi0Xl6KIFiUaypWvhHkLFoHZadp36IzIkSKjQvlKWLlmA9zcGiHmvzHFT3vhkuWy4MeJHRezZs+VrCqMl4gSOQrc3dtIZhVq+0qVKoPYseKicJGi6NO3P5iF5vyFi6CLRY4cuTBh8jSULFkKhQq5SPpLBoDSX5ouCPsP2rJ4WW80LT9lSpdDrBix5bpGjR6LBw8fwr1VG8SPEw8ZM2RE/4FD0KBhY0SLGk3GzPaMdaF2j2M5c+68+GXnzpUXHH+tOvVlUT5+4gTixoknPuQ233Jb7p4jR49KVizrOHRbEVAEfEaAygMGcjOBRaoU/4HB08zOZCVmU6K1gEw15xCTDpZ1Zs2eg/jxE2HlytWgJYFCwG4Pm/vQ3r02hr12zVq4eu2GJJTo2MnmHtq0SVMcO3Zc0muzv/H2uLXKrlVkfuE3QXi+UiVL4eLlKzI27vPDp0yEMXP2e1crM9aRI0cjaJA/kT9/ITBejC5g+/buF2tq/LgJZD5j3TVr1+E3f78hdsxY2L5jp1gbaEXmeSJFiCTn3XfgkGSk4jnr1asv8ybdvhiTQmvy2rXrcOjQYQwcPAw3vGQqvP/wkbinMrZtzpx54IdUz547j+s3byFDuozS//QZM8FzsH/G3lCwINFdlWXurdri6LET2G4X4tKmSYdnz5+LcBE1SjTkyJYDe/cdlOxf5vorVHBFmTLlHNmwTLn+KgI/EgEVQH4k+npuRcCCAN2gmN3JY89esYKYQ0zByIBFCgO379yRBYvbDKr2mr+egsH+/Qccrlv0C757975YUq5duybWk0ePHuPmzdvy7Qymk7x77wHu378vi/vlK1elDpl3KzNx8dJlUOtnpSdPn0vAJXPuk82nXzXTX9L3mgGqdIkwHwy0tjPbDNbcvdsDb9+8kaLTp8+KWxq1kUy9SaHq3r37nq6R52GMCenJ06c4d/4CHj9+KuMwHzSj2whTYb58+T7YnBaebdt3mVPrryKgCPiBAOeOKlVrIF+efJJxr0jREthvz3RnmtLdkrEhTILB+DMqF6xEa0W+fAWRPn0m8FtGhjhP0DUobdr0kh7clLu7t0ba1GlRqnQ5HDp8VJQKTZq2AOMkaIWhlaV79x7InDkratWqiz79BqFWzdqitGD8xsrVazDHnoHL9Gl++X0iZq7KnSe/fCQ1f4FCGDdhkqf0t6w7aeJkUfAwNo8Z8x4/fiwZ+zJmyCRB3cza1cK9NQrmL4i8eQtIMg22o7Ile7YckuGqXLlKIrx59+0hun8xniR9uozo1Lm7xL5du3ZDMM6ZIxd69u6Hdu07In++/MibNz9mzJojl3D56jWUKFEKFCZojaaih+mFCxYoDFpDSMQ4ebKUmG9xN2M5A/fHjrNZtaWi/lMEfgIEVAD5CW6CDkERUAS+DQKLFi+RzGDMpLXLrn39NmfSXhWBXwsBfmfCqkC4e/feBxp0WknMh7lt3wOxKROsSFDhwQQT3hG/5+HVTZMfZKWixRDdNEm0trx8+cLTNyys4+M5jBLCtPXul98Munv3Lng9vhGVH+bcLyzKDFtsnk25wfgyU8f0RQvxvfu21OGmzLtfWpOorPFKvH6ewxAVUFaiwodkXKy4bf2i+6vXbz6Ic+P3QqicUlIEfiYEVAD5me6GjkURUAS+KgJ0zwoeLAQuX33/IbSvegLtTBFQBBSBnxCBS5euyPc/mCqdbl5KisDPhoAKID/bHdHxKAKKwFdDYPPmrRg3boJ8W+CrdaodKQKKgCLwkyNw4uRp/BEwMFxdq3qyqPzkw9bh/R8hoALI/9HN1ktVBBQBRUARUAQUgf8PBDZs2CRxdv8fV6tX6WwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIKAKKgCKgCCgCioAi4GwIqADibHdMx6sIKAKKgCKgCCgCioAioAg4MQIqgDjxzdOhKwKKgCKgCCgCioAioAgoAs6GgAogznbHdLyKgCKgCCgCioAioAgoAoqAEyOgAogT3zwduiKgCCgCioAioAgoAoqAIuBsCKgA4mx3TMerCCgCioAioAgoAoqAIqAIODECKoA48c3ToSsCioAioAgoAoqAIqAIKALOhoAKIM52x3S8ioAioAgoAoqAIqAIKAKKgBMjoAKIE988HboioAgoAoqAIqAIKAKKgCLgbAioAOJsd0zHqwgoAoqAIqAIKAKKgCKgCDgxAiqAOPHN06ErAoqAIqAIKAKKgCKgCCgCzoaACiDOdsd0vIqAIqAIKAKKgCKgCCgCioATI6ACiBPfPB26IqAIfH8Enj59iqfPnnk68fPnz/Hy1StH2TMvx3mAdazEOq9fv7EWfdL28xcv8ObNaz/bfMk5/OzciSq8stwfJxq2DlURUAQUgV8SARVAfsnbqhelCCgCXxuBGTNmIVPGLChbrgKqVa+FHTs9cP3GDbx7ByxavBSFCxdFZdfKqFa1OmrVroc8ufPCtVJlvLUPZMiwEXApUhTVq1ZHlSrVULNWXZQqXRZ5cuXFzp27P3q4p06fQeFCReDiUhwHDh32sd2NGzfRtLk7Lly46KnOyVOn0blLd7x+83nCzzsAO3ftwYsXLz31+z13jhw9hgTxE2L48JF+nvbJU5ugt2PnbkybMRtv3pg74mdTraAIKAKKgCLwjRBQAeQbAfu9ur11+w6WLl+FlavWYPWadfK3as06+PXHur7V8e741+zft3Nbj1nHYd1mHa/71jIzVu/qWPs326ae+TXlXn/9Ou61vnf7X6MPn/pl3+bPax2fzmvKzS/bWbe99uN1/1Pqem1r9tmH6cf8mmPe/VrrWLdN3dVr7TisXS/9zpm3AKdPn/3s19KtQSMEChgInTt3gYfHHuzbtx8bNm1BzBixMXjwENy9dw/z5i2AP3/+kCJZcuw/cBC9evWR/RLFS+LJ06e4cPESuM06Li7FsG//ASxbvgJRIkVBpIiRsXHTFj/H17BhI0SLGh2FChbG+vUb8PDRY2/bPH78BFWq1sDIkaPx8qVnQYFCUuhQYXDtxk1v2/pVuGjJcqxYtc6vat/0+O07d5E6dVqMHDnK1/NQWOvavZfUefv2LVq0bA2XoiW+iRDy8NEjLF+xCstWrMKateuxcvVarOKfl7nWu+f1c+qwDfvyrT/rMZ+2vZ7bum/amF9zTu/qWMu827b24d1xlnlXx1pm3fapj+9V/r3H4tf5/Dr+sbh47Yf71jJur1y9BouWLMOly1d8ff/0oCLgGwIqgPiGjhMc27BxM8KECoPIESLhrz+DI1CAgAgaKAiCBgrs69+fftTx7jjLvCv37lwfW8+7tp7L3l+L1z697rOdKeOv+fPcn/e4WNv5Vt/U862OX8e+Rh/encNcr/n9sM57LK3HzHjML49Zt611vdv+lLretTfnM/2YX5/q2sqt12LdNveXZbbywAH+EKa/a9fun/VG9+nbX9p369bjg/YtWrijXfuOjvKI4SOidKnSjv1SJUpJ26nTZkrZkCFDZb9jpy6OOoMH28qaNmvpKDtz5hxWrFyFhw8fOsp69OwtAg8tGL4RBY4yZcujQ8fOH1Sj61jWLNnt19PT0/FHjx5jztx5WL1mrZTv3bsP8+YvAC0IpBcvX2LhoiVIljQFateqi81btjna79m7D/369cdBbywy27fvxNix47Bh4yZs3LQZs2fPBd3HSM+fv8CwocMxa9YcR1/c2LxlK9asXYfHT5/h7Lnz9nE8lTrPnj3HkqXLsH3nbty8ddvRbv6ChejffwC22Md19PgJFCtWAokTJsaw4SNx8dIlPH78GPnyF8TwEb4LLo5OP2GDQmeQQEEEWz57fwb2fr78mOf7Y+qY98C3utZjPm1/zHvmW1vrMd/6+ph63tWxllm3fTvX9zj2/cfi3Txn5rtPm7N9w8frdXHflAUO+IdsRwwXXhQy48dP/IQ3RKsqAp4RUAHEMx5Ot7dl6zYECfgHggUKjMKFCqNmjVooVao0SpcuI39lSpd1bJuyn+nXr/F9zHHW8a2e9Zh1++Nw+BC/T+3jU+v7Pi7vrtW7sjJeMPG+ju/nsj1DH1uH12n+vG/z9cZgxdS6bc5bpkw5x1hKlSyNzJmzYdZsz0zux7zst+7cRZhQoZEuTTpvqz999hzXrt+QY9dv3EKEsOFRtkw52X/4+AkSxkuAQAH/wJ69+6Wsc8fOwqB26tzN0V+5suWljBpKQ40bNZWyBQsXS9G5cxcQNXJU5MubH5MnT8XatetN1Q9+58yZi7jxEuK8F9crVtx34BB69umPYEGCIF+efJ7aUoNfunRZOW9l16oYMGgowoUJh/r1G0i9Z89foHRp21gTJkiE8RMnSzktMdVr1Ebx4iURN058LFi4yNHvtOkzECd2XFSuUh2pUqZGmJBh0KBhExFm6MJVrGgJpEubHgUKFsGAQUMc7dq264A/fg+IsmXLo/+gYYgWJRoqlK8osTdsV7Vqdfz2m3+MGGFzwZo+czYowFWtWg2NGjfHw4ePsHrteoQIFhzxYsdBufKVsGnzVumfwlKqVGlB68jXJApKJYqXEve4MmXKolTJMihdyvM75N2zap7Zz/n91P6+Zv1P7evTr8/r3Ot13zO2n9q/dfy2bfZv/fO+f2u7Tz3n59bnOX0676eWezcGn/r31HepMqhSuSo4n0aJGEnmiQkTJn3NV0j7+j9DQAUQJ7/hO3bsQog/gyNU8BDYf9DmD/7m3TvxO6enM/21+fuz/vk1vo85zjq+1bMes25/Liaf2sen1vdrXN719zFl3tXx61yfcpz9mz+f2n2tMVj7sW6b85pxmN+Xr9+ALjifSqvWrsMfvwdAxfIV/Gx648YtxI8bD4niJ0SnLt2QLl0GBP4jsLhmmcZdOnfF77/9hkwZM6F5sxbCMLdp2x7bt+8wVeSX+7R4GCZ50uQpsuA3btQYTZs2F6FmkIVhtzZu2dJd4lSsZdym9eP69Zt49eYtXAq7IHDgIOIyZK1H5pwuYhnTZ8TT5y9QIG8+xI4Ry1Fl6NDhcrx7D5tb07t375A4UVIUdSkmdf79Ozry2gWbJ8+eIWTwkMiXJ68ca+DWEMH/DI77Dx/JfpMmzRE0UFDw3tCSETxYCPTt21+OHTpyDGFDhUHKZMlx+uwFuLpWxu/+/MNjzz45fvTYcRlHt642QS56tOjoZLf4MLaFwhQpXqw4yJ0rt2ybf3fu3kP4sOEFf1P2NX6JxSs+Z+9s78Gbt+9k2zyT/PXuWbUe/9TtT+3va9b/1L4+9dq+dX3r+K3bfp33U+r61dfHHuc5fTrvp5Z7d06f+rf2/YY7tIS+foMyZWyKiIl2JcTXeH+0j/8/BFQAcfJ7vnXbDgQLEhSh/gqBw0eOOvnV6PAVgZ8LgbnzF+IP/wFQodxHCCA3b+Hfv/9Bzmw5MH3GTEyZMhW7d+/xdEGdOnURAaRy5SooX76SMNGuFSt7quPdTvcePaXusWPH5XCcWLFBd6/LV655qk4eoXixkqhRvZancu7s9tgrrk100WrfwWaJcXNr6KkeLaoUQHr37ivlWTNlQcL4CfHkmS2DV//+A+W4cTsj033+/AWsXb8RVapUR5BAgVCpoqu0vXX7rtStWrmq7FNAYN8cM4UhCiMM1DcUNmRo5MiWQ3b37j8glqca1WrKftUq1UQQpMspae++/QjoPwBat2or+00a2yxGdWrXk33+e/3mrVigsmXJ5ijjBscVNnQYuNrH6emg7igCioCfCNSoWVve5fFqAfETK63gMwIqgPiMjVMc2bp9B4IH/VMEEI89e51izDpIRcBZELh46Yq4ITHj0mM7E+517Ca97o2btxA6RCi41XfzWsWx37FjJ1m4R4wcLWUFCxSS/X79BznqeLfRp48tDsXDw0MO582dF9T6Hz1+0lN1CiCFCrmgRnUb424O0vrTu29/ZM+RC7Xr1BOXqL8jR0XMf2Pi7LkLpprEaPj35w9t27STspzZsiN+3AS4e+++7NMC4t+/f7S2H2fhps1b0KBBI3Tp2h3Ro/0trlIsf/v2nWT9SpIoicSVFClSFClTpMKrV69x/cZN8DwlS9riZZ69eIlwocMiV46cch6PvfsRNnRYh5BQrVoNqb92rc1NbbfHHhFAGjVqIvX5b+CgIfjNnz/kyZsftHKQUiZPKdYcRyW7AELXMtdKNkHJeky3FQFFwHcEnr98hQoVXGXeUguI71jpUd8RUAHEd3x++qOeBBAPFUB++humA3Q6BJo1bymLLTNYWenp02eYPWeep0DoUH+FRN0677Xw1vrcHjBgkPTVo2cfOXTs+AkEDxYcAfz/jm07djmqM35jy9btkj2Lhcw8Q+vB2DFjpU7+fPkRJ1YcvHj5/tsjpnH9em6oZrccmLJLV65K/MfNmzdx6vRpPHj4ELTusM/WrW3CBuvSisqynj1tLlZZM2dBsiTJTDeYMnU6ggUNBnf3NlL26PFTRIvyN5IkTISnL17hn2jRJfWwaXDz9l20a9cB9eo3wFhLwOqr169RqEBhxPgnplRlRi6et3HjprJ//MQphA0VFlWrVJH92rXrynG6nJLOnb8gwkaXLl1l/+DBQ/JL9y72w8xkpKiRo6BQgUKybQL6KZyECxPe1/skDfSfIqAIfIDAi1ev4epaVd4zFUA+gEcLPgEBFUA+AayfsSpdsIIHUQvIz3hvdEy/BgIMvqbrFDPNMQh8zJhx6NOnL5iad9CQ4XKR23fsBAUVauDjxo4LZr16YI91YIW3795JVqs0qdPJwp0gXkLMnTdf2m7dul3KyDgzbS6pbdv2UmaC0ClotGnbQYKnmzVrgQwZMmHwkKFS1+u/ceMmIHbseKCwQVqydDnKl3dFixatcOv2+6xRzVu4O87LOJT7Dx7C3b21lGXJlAWrVq9DkoSJZX/W7LnS16HDRxA8aDAkTJAYj588lZgHBpEH9P+7BK1nypgZkSJExl570H2XLt3B76ds2LBJsloxC5ahM2fPIWmSZKhY0RUlSpSWtMHP7B9r7NW7n5yXsSjLVqxGksRJZJ+B6ozxoFBDvPLnK4Abt26jddv26NChk6Q+zp4tJ7ZutWXoojsYswSOHD0ON2/eklOfPnMGiRIlFQHPjEV/FQFF4OMQeKkCyMcBpbX8REAFED8h+rkrUAD5K0gwCUKnW4KSIqAIfBsEjh0/iYYNG4MZmujGNHHSFMeJFi9ZhoaNmmDcuPHo0aMn+vQdgDt37jqOv3n7FhMnTUWHjp0wZco0tGndFiNHjXEcpxtTwwaNwJS/JH5rpFXrNjhy5JijDjeYVapSpcrC1Hs6YNnhN0nix0uIoUOHSen8BYtAoYUC0759B6SMLlCdu3TDqFFjJH1u3779cPHyFTAl8JAhw9C3X39MnzFb0vJ26NAR06fb0gizMbOJ9e03QAQQ7lP4Yp258xbg5KkzaN++o8Sa8BjPSUGBaWn9+/tNtt3dWzk+gsiUwtWq1gDTEjOA29DMWbPBtMeTJk3BzNlzMXDgIPlbumw5mHmMAfgcA92u+C2kx0+ewL11W1SuXFW+wWL6ofDYrl1Hwc2UVa9eC5VcbZYVU6a/ioAi8HEIqADycThpLb8RUAHEb4x+6hp0wforqAogP/VN0sEpAt8ZgZMnT6N0mQpgUPmPolu3bqFR42YisGzZvAW7d3ugQ/uOCBc2vHzb43uPi3EwQ4eNQMKESXD8hOfYme89Fj2fIuCsCNAFq1KlKqJMmDBR0/A66338GcatAsjPcBe+YAzqgvUF4GlTReAXRmDnzl2oWr0WLl2+/EOu8tDho0iVOh1WrVoDulbxQ4dbt+9Eh05dPbmCfa/B8cOJPXv1w+kzZ7/XKfU8isAvh4BVANEYkF/u9n7XC1IB5LvC/fVPpgLI18dUe1QEfhUErl69hkf2b2J872uixWHZilUoXLiofDegaLESGDnaFkT/vcfC8926dRtMG6ykCCgCn4+AVQBRC8jn46gtARVAnPwpUAHEyW+gDl8R+MURePT4iQSAMwj81asPs3b94pevl6cI/FIIWGNAVAD5pW7td78YFUC+O+Rf94Tbtu+wfQn9rxDQIPSvi632pggoAoqAIqAIKALvEbAKIOqC9R4X3fp0BFQA+XTMfqoWkgUraDD5AJrHnn0/1dh0MIqAIqAIKAKKgCLw6yCgLli/zr380VeiAsiPvgNfeP6du3Y7LCB77Ln3v7BLba4IKAKKgCKgCCgCisAHCFgtIOqC9QE8WvAJCKgA8glg/YxV12/YJOnwmGt/67Yfl3LzZ8RGx6QIKAKKgCKgCCgCXw8BqwCiLlhfD9f/x55UAHHyu+6xZy9S/ZcGadKk9/QBLie/LB2+IqAIKAKKgCKgCPxkCDx/+QoFCxYRxeeI4SN/stHpcJwJARVAnOlueTPWFy9e4vrN27h5+45mmPEGHy1SBBQBRUARUAQUga+DwMtXr9C2XUckTpIcCxYu/jqdai//lwioAPJ/edv1ohUBRUARUAQUAUVAEfh0BB49eoJLV67hydOnn95YWygCdgRUANFHQRFQBBQBRUARUAQUAUVAEVAEvhsCKoB8N6j1RIqAIqAIKAKKgCKgCCgCioAioAKIPgOKgCKgCCgCioAioAgoAoqAIvDdEFAB5LtBrSdSBBQBRUARUAQUAUVAEVAEFAEVQPQZUAQUAUVAEVAEFAFFQBFQBBSB74aACiDfDWo9kSKgCCgCioAioAgoAoqAIqAI/F8JIE+ePtM7/oMROHnyFG7evPWDR/Hpp3/67AVev37jaPj02XO8ePHCsf8tN548e/ZNv/Hy8NHjLx4+8Xj1+vUX9/OpHTx+/ORTm3xxfd53PsfPnz//4r60A58RePTY83P5NZ5Tn8/2fY9cv34Dp8+c+aSTPn/xEi9evvykNl+j8uMnT/Dq5auv0ZX08fDhI5w8dRpv3779oj5fv3mD599pDv6igX6lxmfOnMWtW36vnRcuXsT58xe+0oWfo4gAACAASURBVFm1G0Xg2yHwxQLIg4cPsXTpMuzYuQs7duzEwoWLsGTJUixatASLlyzFrVu3v93oP7Lnd+/eoUePXqhcuepHtvC52p07d30+6MuRxYuXYu7ceZgxYyZMH+/e2RocOnwYK1asxPkLF3HkyFEsWrRY/i5euuxLj8Dx4ycxe/ZcTJ8+A8ePn3DUfe0LI/j8+QtMmjQF27fvcNS3bpw+ew4nTp6yFjm2Dx46gsNHjuLAwUPYvHmLp8Vw67YdOH7C+3aODgC4t26HhYuWWIt++u2hQ4ejTJnyDiFg6tTpyJM7Hy5fufpNx/7m7VsMGz5Svjp77/6Dr36u+w8eyjvRrVuPz+6beeD79OmHevUb4c69e5/dz8c2fPXKxgi9fPkKDRs0RtOmzT+26SfV8405unrtGtxbtcWly1c+qU+t/HEInDt3HoUKFsHcefOlwZWr11C0aAn07dv/4zr4DrUofHJd+VyaM3ceOnbu+lHN37x5I3N25So1cPrMuY9q8zUq8eqGDhmGIi7F5EO3n9vnq1ev8ezZe+Xfvv0H0Lpthy9S4CxZugwVKlbGbo+9nzusT27HtdPMP5/c+Cs06NKtJ3jdftHgocPRf8Bgv6p99HGuQ1u2bMWmzZtx4eIlrFy5Clu3bvug/YMHD3Dt2vUPyi9duowZM2Zh3779OHXmLJYuWy78zr59Bz6oqwX/Xwh8sQBy9eo1NG3ujkQJEyNPnnxo2rQFatWui7p13ZApczYsXuL3C/OtIecLlC1bDmTKmPmLTtWkWUvMnjPvk/qgpqdwYRdZQGvXroty5Spi/ITJGDd+IubMtS2wS5evQKpUaREpQkQkS5ocDRs1Rb58BRE7VlzMmDn7g/NR2zxk6HD8HTU6SpcuhwoVXTFtxixMmTod/foNAL9U6h1Rk1W7dj1kypRV7tXyFasc1ShY9O03ABHDR5TxOg7YNzhWf/78IcBvv8tv/vyFRON9//4D5MtXQMpCBA+BKVNneG3qaf/Jk6dOp7UqWrQ4kiVJ6riOmjVrI/rf/+DGjZuOsm+xQYtCmbLlED9eAjx+8vU/+ESBN2KESGjW7POZ+Nt37qJgoSJwcSmOh48efQsYPPV55uwFkCHjcxQnVlyULVvO0/GP2Tl77gL43HpHjx4/wcnTZ7075CjjfELBi78/E9GyOHnK9C/WLP/oa1qzdj2CBA6KmTNnyVB27vJAqJBh0Llzlx89NDk/GerjJ05+0VhevnyJJ08+znr34uUr1KxVFxkzZgGf3e9FlK+KuhSTNemuD+/Lx4zlwoVLWL1mnaMqFWR8f75EgGvXvhOSJEmB7Tt2Ovr91hubNm/DsWPHv/VpfOz/6bNn4HPjFz179tyTwOdXfb+O09I0bMQo4fEiR4yMZMlSYPCQYR80o1BdsGARvPUimO/evQdlylZAiuQpEeOfGGjYqInwLb/584d27Trg88X4D4agBU6GwBcLIOZ6K7tWQf/+A8yu/NKEfv0bM2meTujLztq161C8eElfavh8iC8UhYeoUf/Bho2bfa7o5ciRo8cROVIUuBQp5sl958jRY8iaLReaNmvpaDFz1hxh4tOmSiNlXHQouJDpp4bZ0MtXr1G+fCUpHzBgkCmWl7h+/Qb4++9/4JO2vH37jkiS2MZI03ISM2Zs3LC7Q23fuRv13Roh0O8BkDd3Xke/3CBjSY3VxImTMWH8BHTu1AXnzl8QRmfZilWoV9cN/fr1R6TwEcBJZeHipZ7aO/uOh4cHKIQYd58zZ8/BpWgJnPxCJuRjcDl48DCKlygNCvrfgnr16g03t4Zf1PX2HbvQqEkLXPnGFqGVq9Zg4uRpjrGOGzcBNWrUcux/zAbf5TZtO/ioSV65eg169nr/vn1Mnz9LnfnzF6JwkaI/y3C+aBy1atXF8GEjHH20at0WHTp2cuz/qA1axoYOH4ljn/nufy7TffjoMTRq2hKnz/guHH9tXGjtLla85Gdb++i22rvPACxfsdrPoX0KNlevXUfjpi0/aT32cwC+VDh58jTadeiCmz+BR4cvw/ymh5o0bip8x7Dhoz44D2WOSpUqI8DvAbB5y4fWETZwKeyCwAECSlsqSQsWKCT9TZnmu9Lyg5NpwS+DwFcTQGrWrIPJk6Y4gOEDRl9P0r4Dh1CpUhVUrlxFJjIy4AULFkaLFq0c9WfPmY88efPDvVUbPHps0/ZS008mg/7WI0eNQbFiJXH02DFp07lLN1SrXhMmroNS9PQZs8QtadGSZciRPRdWrno/6a1atVqsEOaErE+pPm/eApgwcZIp9vb33PmLaNmyNfLmK4iNm7Y46nC7WYtWuHP3Q9eT6zduIXHCxPKCcdsrcQLtN2Awnj61XevM2XOlbvas2R1VT509j7ChwyBooCC4eu2GlE+aPFXq9erVx1HPbBDzdh06e8sIXrt+A9ReuFaqLNUPHT6KUCFDo58XoTFR/ITImT2n6VJ+t27bjnTpMqB5s+a4bGGEaZK2xnNQc0mBqbnlvlo7IuM+esx4GNcyti1cuCjmzV8o9/z2nTvW6o7tjh27oFAhF3Tt1gMP7M+U46B9g+5QDRo2wdJlKxyH7t67h3HjJsozR40N3aiM2wwtdfx7a1G/sI/6bg2RK2cekJkztGvnLhQpUgzGJ51WrSIuxeW5NHWsv2TEZ8+dL2MdPHgYypQtD7o7kXr26osKFSrhrpdnZtTosWKVatO2vbUrbN++UxiAa9ffm7bplpU9ey4MHDTEU13eU9fKVVG3nhse2M/X0r0NWrVqI/XoLkkhs02bdqA2jdS1a3c0bNjI0Q/dA1u0bI38+Qv6ql189OgxXF2riNVs0ZLl6NCpq1gVqNXltd+6fRfTps8QwY3XO3nKVDkHhfJKrlUwcsT7RYy+7dSKVa5cDXfv3Zd6XjV9nDNixYyNRAkSOQT3uXPmiQsWhfUGDRqjeYv3Aj07mTx5KgoUKAy3Bo3A941E/COEDY9sWbNjwsTJUmb+nbtwERkzZELMf2OgarWa4hJJhmPmrNnYtdsDFSq4iqvj3bt30bfvANyzuJzRopk1a3YMst8TazwM3RP5/IwcNVpOZdUQbt68VY7VqlXHYeWiC9L8BYvFxa9b957YuWu3zBOVq1QX90VanR57iY/YtXsv0qfPhNgxY6FPvwGYOn2WKE04dhLdHYoXL4WdO3fL/oEDBzFz5mwQ+46duqJ2rTpSTheLEiVKo0fPPuD7bejCxcuoXbc+ypQphz4WV6inT59h3PgJ2H/gIPjsDhzkvevH2HETUKRwUQwfYcPA9EsrLK2pvP9WzChYjhgx0lRDkybN0bFjZ8e+140ePXsjd668Dvx5/PqNG5g7dz727juA6tVriVXeavniNdWpWx/lyleU59j0yXHQmsx2vM9FihQV11Me79W7L0KHDI08ufPKemPa9OzVR9Y0rktW4ntYvoIrZs2aA3FFev4ctADMnDUX6y3KrE6duqJx42bgvfXOsrpn7z60dG+Lo8dPgs9ElarVHZZkjtGlSFFwDCTGl9SoWQcTJ9veOet4uN20WQvRPHMuNQpCm6DQH3ny5Pdk5d+zZy8KFynmmLPZnhYpWt47dPB8P+iayHe4QIFCOHXaFt9Su059hAsbHnnz5sey5StlKLyWESNtz8HceQuEEW3p3tpx/znPV6tW08E/0LpfqLALJlr4C1pvGzRqhiPHjoHrCueiBg0a4cDBw3IOus2WLFXWcX1SaP/37PlzmSNptbVq8vkuzpu3AFTyNWnaAo0aNZEWFy9eRoIEiRDj3xioXaeew/V24+Ytsi5RWL7kRflCbwK+83z3Sdb5bMDAwfLM1ahRG+cv2tysKXxxHSSfwjmSQp91jWesS//+g7Bnr83lbP3GTeJNQU+TZs1aoE6deg4rE5WZq1avkfPu279feCfuDB02QpSXXq3pfHdcihbH7j37pI1PbmbuLVvJ+u71HWajN2/fyfrK9b9163bSj9d/pUuVQcRwERzFEyZMkv7atusgZTt3e8j6Vax4Kce1OCrrxi+JwFcTQGghaNeuvTB4DIAaOmwkxk2wMfa3bt9BA7dGiBghskzCZAbiJ0jsWJBHjhyNHDnzYMqUqShcqAjq1KkvYPOF4QNdq3Y97N6zVwSYrNlyYMPGLfKiZs+W01F30eKlUjdD+ozYtmOXaOnDhgmPrVu3S1/Ll62QBdjcxcFDhmPh4iUSa8GFmYy9d8SxHjt+UhgjTsRWlzIGhTGegeZkr8QFhmMvUtgFDGD2iyZMnCL1KYBYmeJSJUtL+azZc6WLShVdZf+oD6ZgaunpouKVPPbuk3Y1qteUQ4zzCBwwECqUr+ioyvPGiRUHeXLldpRxYuT9iBQ+IoL/GUxctMiE0C/bK3FBT54iFbZt/9AsTp9918rVkDhJcly7cVPcD2hVYewLGepixcsIw+e1TzLUOXLkwrRp05EjR2707t3XaxXxJV6xao0sjF26dpfFhePmQs17QIZ6x67dKFGilCyO23d5YOHCxciQITNatLQJwVz0c+XJj6HDhmPM6DFwcSkhwixPtnXbNhGU6MJGOnjoMAoXLobT9kXWOqC3b9+B7gHhw0VA/gKFwOeSC2POnLmxYdMWbN6yFZkzZ4V767aOZoMGD0XjJs0xZfJUGSOZC7NIbNm6TYSdm/bgw0lTpmH0mHFYvHgJ+PwbKxiF4YGDh8kCSmG/StUa4uIwYcJExIkdF7dv35YFnkxKmjTpHQxCly5d0bBhYxnLjRs30KNXX4nn4rtYqHBR7NplY1gdgwUk+JrWO7r7Ucjn+1Ojps0SMXzEKCRMlBR9+w9Crz790b1HL2Fuc+fKI11QWKdAkC9/IRkf3QXatu8kTO20aTPESpk1y4eumxTAKlVyFXe0qdOmS19jxozDv9H/wZChI4QZ/O+/1Fi7boMc43OSMGESjB8/AVWrVgcZfNLsOXMRL048mR/I2FuJY2vapJkIORzL1es3UKxocYQLEw4LFy9Do8ZNQW0dNYFRI0d13KMRI0aJG9qoUaOFkc2dO6+DGaT7QYNGTTF58hRh2DJnyiKKE8q9R48eQ7cevbFu3XpUr14DVOJwIW/WrCX++y8Nhg4fhS5de2Dc+ElYu36TCCJDhg1HhoxZP3j2KNTTnYGupmvXrcf+A4dEiKpWo7bMB7du30aSxMnkunnNnGf5bnTo2BUrVqxCnlx5QNwpoJExSJs2A+hSQTpz9ixat2kvAiVj2KiMoBsiadr0mfjN32/o3LU7BgwcAjKcVk02BVJarSZNniYYZMmaQ55ftt24aTO69+iNVatWoVy5CnB1repwH6tSpZonIbVhwybo2Ml7FywqDah9nTplKpIn/w/jxo6XsXFN4jWSmVxCpVSOXMKA8SAF7Zat28k1URBLmy6DuA7z2Ny5C/C7P//yzq5fv1HeDx6ntZixjkmTJIOLSzGQkSbxnapeo5bE4vEeFizoAjK5dNHr3bc/1q3bIMqTVq3bSxnnhGh//4sxYydIe77PVCZwLixavDSme+N2e+LUGRFwKUgtW7YChQoVEWaYHTDeMtAfQTBlqs06+OLFS/To0RuLlnxoiabSa9SoMVi8ZAkKFi7qYPQGDRku7/K8ufNE4Jhnj7/h+08lkYl3W7dhEzZt2Yb169bL+TvZBa7Ll6+IcmPq1GkYOGCQzHE7d+6Sa8+VK48Irlxz6OKYLGkKpE2dVq6dMY/hw0dEW4vihesB8aDwtmbdBhEEGVdK92UKI6Sz586jdl03mYv53nJOTZkyFe4/sLlWrlu/QYQLo/iRRoD0mSVLNvD5mjF9JtKlz4i1a9fL4fTpMuCP3wPKPMp5IlWqNFi/foMoVnr27CP7PXv2FvdPxnPyHZkxfQZq1qglQrQRoPv1G4h69RuAWHCezJI5G+bNsym0Ro4ai169+2H+/AWi7EqcOKko8R48eoTo0f/FP3//i83bdmDMmLFIlDiZ4z0nP8Vnee2atTLWtes3SlzQpk2b0ax5SwQKGFhiNw8dPiL1JtqVqvSiCOD/d7k3FAD53Nar/97iTeHDza0RGNdI129iY1zDDWbmlwpIjmHQYM/uV3SHpVC6d/9BRI0URZSmZ86eN80cv2XLlEOEMOHAIHm6zv3z9z+g2/aJEydx4cJFFCzkgtWr12LsuPGYv2CRQyB1dKAbvxwCX00Aad26DRLES4hGjZuBkj0X0LH2hcCgNnrseJmIDx85juuWTEhdunTDsmXLpRq1CZky2GI1qB0LHSoMRo8ZK8fofxk6dDjRDrKAzE4a+0RGl6AY/8YUZs+cL2eO3GjRwl12ZXIvWky2qe1p1bqdCB+3b99BiWIlRDgyTJ9pT4vCth02JoWMZYFCLrKwm+O+/ZKB4ctKjbOVyLxyEaP2f/+BA9i/3xaINWOmzQVLBBBLg6pVqkk/ZOQoIFBDGyxwUPgkgFiaetqkKT3Ab/5RrVoNKT9+8pRYVkqXLO2o9+r1G8SJGRu5cuRylNHl4NjxE6JJunf/oWiFeF0FyEA6atk2qCEm8+sdsR9qeFyrVMetW3fEncmlaEls3LhJqlOQ46JipXd4J4zFlSu2YF9q5clUeCVqbCq5VpVniJaeMePGi8sbJ1VaeajVInExCRMmHLZsswml7dp1BJlFEgPl+ttd2rhd1CKUbtu23ZMAwkmei7JPfuCcjJMlSynMLPvmwhsmdDiYmBu+BxS4SR579iDlf2llEuY+F9OQf4VyaHv5rFDwpZDLxbRKtZoijHHBJxMZP258aUPtuGEYOX73Vu1E2Cc21G4ZYYmCY6069R3PT+fO7wUQWlBovTp46BDI8HOB4ALmlebNXyRMtSmnRYUWIZrhL168hMyZs6Fd+47mMHbt3O3J/XHHzt2oa18EFyxYhFSp0zvqMviYsWRWy5o5SEVF9uzvhWMyLTFjxAStIyQKmp072zTQtPqwb1KP7j0RI0YsMIsSBR4Kst4FS7LuiuUrxWoiDQF5X5InSwkyCCQyUGROaJEzMS85sufE0KG2RZnCasKEiXH8hC0pRImSZdC6jU0jyOcgatToYjFgX1279wKfQWI2ZMhQBPg9oLxrFKLousnATRJdKuk2Zq6HDKd34yejW9X+frPd0uWr0KZdJ4clo2fPXihpf9/79x+IKJGiYtUqm6a0d59+oqk+f8EWY0CrFV0tSVQE0UpgiElHGJ9GzTC17bTGUdAlPXv+wpMAIu9OEdu8y+O0dFOIJrPGQH5aDNhH9+69EDxYCIeF0qsAQuHPJwsIExFct1sIKRzVq2dTYPEZiBYlmmDKc2/dvgP//htLnv3Ro8eiYkWbNZjHmDAlbpz4olghtuynd+/3Vub48RLKPWLdggUKg4ykaRc7djzRwnOfFrxoUaMLHhRAkiZNDmJAOnToMKiIuXzlilgoqKkm0UpJCwBpz559DiFaCuz/jh47IRausWPHSQmficRJUmCGPU6Gguoc+zxH66Zhhq19cLtuvYYg00micsNjzz7QitW6bXvQTZnrYfZs2ZEgfiKpc/ToUXm3TTIZrmd8Vqmw6da9h6xNe/ftl3NbGVNaMLjmkmrWrucQOmkJKFashKd5ZfHS5Zg2Y7YInxReadEkUelSrnwlrFm9Rs5H5jl48BDyPNN6U6deQ2zabPNIoMWlStWajsx0TIhiVeRJh4Awutb7nipVamHAebxNm7YS27fbY49U79CxC5o1t/EPl69cQ2GX4vKs8mD7Dp0czwOTeCROlEQsW7RUZMqcXYK2WY8urnHjxJP5jM9Gzlx5cfbs+0QC/6VMJYocPitUbFAJYBJg8Nrn2GNO6XqXM2ceLF/+3sIvgwRQukx5UIFJ4n1KniyFCDDcnzd/AcKHC+9I4MB1lvGmJFr002XILMHh3G/Xtj0SxE/ouEapZPnXvFkLud8DBw+1lNo2TYY6Wm3IH1itVaYyvS/Chgotzz6TSlBZbNy1yGNQiBo08L0F1TtFqulLf38NBL6aAEINI9056PZCV5rxEyaJ5s4KEzVIfDiNFt56jIzW7LkLhMHOnjWHHOJLlyhhEuy1a5qojWKMA33OSXQF4EJhiFoQq9DTqWMnYWB5fM2atY7Fl8x3w8bNxGzr1rARKlWsjO7de+KxF0sGTY10xVi0eIlo3tOmTQ+3+g0lhaA5p0+/U+0CSKlSZfH6zftgVU60DNj/J1p0UPvBxZ1EUzSxycYJyNKpsYAsX7FSJtR8efMjUICA4ILkEzFVI6079KEeMngoVq9Zi/37DyLgb7+LtobtOEEH+SMwyper4OiGAkjcWHE8CSCOg5YNxpIE/zM4rlx97xa0ceNmsRp45z5gmtKPtp5bYxyzj50axPDhIiJn9hwfYG/a8Hfv3v1YsmylaPLJMHtHvO8BAwRC6v9SO9wFli9fCTIOZMhJc+bMFcaJjCCpabPmjmeC+2TSKdRS08Z7tGSpTShmxg8y4tT0kMjw0jpw5fJVcWXiZDt48BCQqaFr0ps3b1G0WCnJ9sH61MZRM+thz9hCZo+ZZUhkKGgNsJJxV2QZ3wu6CpA2bNqMDBkygVYwujqUL18Bffr0xbwFiyVAlVnovBKFdgYGXrpsw4CuOFWr13IIJGTqmtkzSS1bsVo0ucwWR5M+47q8W0gYEEu3HUN0j2SQoaGGjZtj1Wqbpo5lzIxXqlQZcxhz5y1E/QY2qwtdBDJkyiqMAStQW20soI4G9g0u5ukzZBIhgEVULjAA3hDdI5gAwxDnD2oKmbGMi/Kdu/eF+aOrkplTTF3zO3XKNNECGmUENZ8UfA1TwHoUQClY3LxpS0JApp8uNCRmiGECCUPUgNawWwtYxmulGykX1wIFi4iFhVrtihVdZV6k9ZhZgvhMUKliiC5OIf4KJZYKr4K6qUMXVD67hkaOHitZ56hIIbVq1Rp58+SXbVrQGNRs0piSAaam2lDp0mUxcKAtxozuT3TbMUTmj2NfZmeGaLHjPfaOWKes5dkwdY4cOSYutZxPqYGlBYPjM26OVavWAC1Khho0bOyjBYQubQxyHjdhsihQ6tW1CSB0i0ufLqNDUUIGkIIjLQ98x1rarZ88B90TiblREnDMVmavSuVq6GiPQaGrF62rpJYt3YWhs87xDRs0Ehc/Hh83brzM61alDsu79+jjYBo5X/P6Q4UI7WCopXPLP87f+Qu6wDDHPMT1iG7NJGqUmVyEREXCVm+s0DxGfGk9iRQ+klgPWLZr9x555/je09LE+WWgnRGkkozvmLEscF5g0hE+19yuU6sOPPbsRcfO3RzKNBmE/R8F0rLlKorrmilv3txdNPFmn26OFGIp/NB6QxdXEt2s6KpdqnRZcdGrUrkqGrg1lGeWLqxcS2hRNkSlIuciEmMajbutOW79JeNLQTh6tOhi2eSx0aPHeHoHGjRqIpZkHqNSKXee/A6rF8u2bduBdh06SZB+zhy55J3mu5Yrdz5QAUnq1buP4MVtrgM8RkuzIbof0cODNHTIUNSp837+opsbk80YqlipCmbPtgmtpix3nnwSj2j2+UtXda4LpFOnTiNxkmTiIsl9ukJmy5ZT5jNinTVrDiywv7tU6HincJKOADRr2kye5cGW2Cweo6sreYKFixaLtZB8TNo06Rzu8aY9Fb3ke5gJy7gIm2P8peWLbfPnKwDyIkq/PgJfTQCh6XnSpPc+1fR3pX8xmb0b9oV6z94D6NSpi2gLuFgaYhYoarCp1aXPbvr0GeXQ0aPHES9uAuze7SH7TP1KbRP9sUlMy0gpmkS/zdSp0mLUaNsExDI3twYOVwFq2stXqCR1yUB279XX00NOrQv9b620YuUqcd+gBpNuI3SnYszIx2TeoF9qnDhxESxIUNByYKWWLdzlRTOLGI8ZAcQaA3Lw8FExUf7151+4bY8ZoIaXL6lPqVOpiXjw4BH6DxyM6tVripBFf1i2p4WKiwuJFpRQIUJh0GDPcQR0T6E7hm9ETTc18adOv18ouvboLe4jpp13bmlk3OrWbwS6rhlivETxYiXEIkBzvFciA0xmlVq67t16SOpVr3VMPAP919lXlMjRREggs033I6P9X7BgIeLGSeBIM8wYJLqTkdiW2r3Bg4dKoCGZEWpFSXRDoPsWtZckWj7IgD58+FDiPKglI0NTr14Die3golukaAnMX2Azu2/YuAlJk6aQhZrt6QNcumx56WvmjJmIECEybliCG8nMN7ELBWRG6ZNPWr9hI4qVKI0nzzx/f4Lmbi4qXp9LMs0nT50S9wCT+pmawvoNmjiEsh49e6G1PUZkztwFwsTJyez/7j985Ai+N+V0ealhd2liGRdJWrZITLfZtHkrT2548+fP92QBYTA5M+eZZCnduvWUhY9zAwV2vufeUfv2HRxzA49TC0lriaE69dwcuDFtJK1CFEK2bt0qcwMZZ5r/6arlUzYbuh9lyfI+DqtDh07iv27OwV8ygfQvN5ieOXNO9uvXd5P5gsoXQ4xjI9Pbtk07iUNgrAPfDVpPxAJoiSnjonvlyjVw3ilesgyodSWZuAXGhpQvV16EYzI/XolxC4yvMTR95hz07vs+MUjbtu1AwYI0ffpMwZIWN1L79p2QOUs22eY/WkBMnA5jO5hpzxAFLzJSFIpIFHoo0HhHtLCQkfJKHH/zFjatv/WYsSpRoWVcqXi8eQt3dO9hY/qt9RlPwaBwxk/w/WzSpKkIzqzD5B3UMJtsZYx3oABL4cerBcR2TXkdTDTjUhZZhCq+/2bOpeBmrKWdOnYWi6WJReR5KUzSpYz0+u078cenK1/efAUccxFjpmbOtrm4GZe1bl27i6KCLlJeickoqD1nnALpwcPHovAZY/cy4LdpOAfRasZ4QZ+Yb1rySe4t3ZE0WUpxu+L8McCLW4181+fVKzBFfImSpcVyyHbVatT6wPJLt1+3hk3Ewi2dW/7RaluiZFmHoo2HKOQwHsAQ5ykqNYxFyKzDVFBVtwjvpj5/6RJGN1Mr7gAAIABJREFUCwgVNIbI4FMwZiyiT0I65yfON92695JmdWrXdSSyoDsr47iM5YRzsJtdUbJl63Zx0TXfbqHCi/wG3R0p/NL1kWloSbREUwiggEt+wawDtPymSZvBYZFj3Xr13CTulds9evRELcv1MtmMub88zjmH6fwNMR4pdeq0H2S95LM7bJhNcKGHBd2iTSwK57dMmbI41jK6H9IaQesG51JaN30iermQ9xjh5fmkZZoWUsbvUrmWL3ce8bY4cOiIp65KliiFf6L946nM6w7nlNgxY4tL3T17PKDXOrr/6yDw1QQQatJp4ieZCZVMeJ9+g8R3lq4xh4/YXCWozUmZMjXov0jKlTuv5BvnNl/IuLHjSjkfRvrSb9iwUfbpb06XLGN2JcNCTTWJGqj/UqQSjSL3KWGXKFXWEZRM5pOBgyROeAwoZ5CsIWo4fPuOBft3a9jUU+Agr9NM6KYf6y+ZvUgRI4NWC6PZ4/GWLVrKi9yv//ssVhMn2YLLU/+XSrqgqwgzL/GFN1YSHiBTkj59Bim3Tk7UAvLlX7N2gwN/61i43bVbdyRJkgxMqyexAXHiO4J+efzl6zcIHyYcMlqsSixv1rQFyLwYWr9hE7bZg1nJTDFIM1/efBg1chSqVa0m/rvefaSNcSiuVWqINp73gG44fEZItGQZNw5zHjIUCRIkFlc5ljFrRk17DIupw18KIDSXGzcBaor4jHBxihAuosNFhy5ZEcK/369Ff+0CNm11n74DULSYLUsaBYxw4SI4zN8UXunO57CAHDmGXLny4thx71My0qWGvu5Tpth8shlYGJ2+vXZtXZcu3aU97wMnWWoyy9tdXCgc58lbEAcOHJJLpJsEtVzU0NKdqmz5iuK2woNkvhgoS6I1kIGLrEfi88z4JcZ+cMGh+wdp+IiRCB7sL3TtZmPmOnfq7LCKUUCka5hVQGRsCeMHrMS+EyVK6nCb5EIbJEhQMJCeriGMeVix8n0CiBXLV8jCbjTxjRs3RZjQYSWeiszG6jXrxS2pU+euoLDmE5HpS506nWh4T54+I+6W2bPZrKVsQwUDM8GR+O4YDTd90qNGiSbzEGMSqCnesGGTpIskhlYiY877QXcdxjRR+58xY2aHpYB1t23fgdy58zlcFaipnTTlvRbQ9Mf5YcdODyxYtAT9+g+UoFKj+eMx3jM+cwYXvh+cM+i6kjlLDoegzLmTvvu3b9sUJPz+gWHWzLn4S3eLMqXLCQNK7SZdDZlBjYLyqzdvZB5iADW148xolzLFf45r4D1hbINh1qkJNQw3Y2UYT8fvDZGIDd8xYsfroEbcpMy1jofbdKXLk7cAhtjTdtI1aOPmrWA81cBBQ2XOMm2II6+VxPTKgywB7bSKMQjWK1FxQAaLQfWkVP+lRsniJWSbz2OUSFEcAbmM1aDigGO2uUWGd7yjFIjKV6jsiJ8jM8bYLRLnkWzZczkYb8YhMt6G7y/niuRJk6NCeZvAQQHStQqVM2ekL1pKTcIJxkaZNadJ05bgnE/q13+wxM9wm5gXLvSh2yOtGjFjxpH4HNajoq6yxHlJF/KPlu7f/QfEgoWL3xd62WLMj4nNbNSoKdwa2OIBGK9mrD9swjg+CiEHDx6U+YdxnCQKgm3avE+UQcs0YySpFOS1mmQZvI+cL3m/mYSD3+E6eeqsWM2bN28BWgystHf/ASRIkMThEshjtIhQGWN91idPmynlnJMY32QEMtan0rNylWpIkzajvOvW/s32oUNHECdOfDBgnsQ4D1pWSHTVZBp8o32nKzmFOhLvZ9ZsOUUZunnrdgQOFBSt3G3PI92O/o76tyhquK6dv3hJXNLommriS9gH33O6fxYpYru/fA4Zq8f4FhKfb6MQ4z4TaJi1n3wGhRkmMyDRGyRFilSOOYLXw/tP8TJb1hzoZXezo1X872jRHW59w4aNkFgwJpigKzrnOCbAobuiWZ/kBN78c6vvJnwH31tDvDf8PpiVGOtIvoX32CgHeZyKzb+CBhPh2Vqf21wzDV/nsXc/IkeO5rDQEVOlXxOBLxZALl2+CreGjcXFgZM2taPMTkVXiBQpU8sLSOadfpEmnSxfHjIg9JM/e/6imD7jx00AfmejfYfO8r0O+mmSoWCwJ4NkyahQIxDyrxAS5Hv5KoNSKyNokD8dbiKurpXlvLRY0G9+jj3wixNhqZJlECJEKEfmG7pgUFvCrB90x6IPIicEn+jO3buicVlqybJEjQuZLZO5x7u21MRkz55TJg9mXKG7BSe9YcNHOBhmuqRQkxE7RiykTpVGAjnz5S8orjPepf198uSZBLvGi5dQMqwwyJYapFGjx4EZenwiMiUMWGPwHJlaZvEwNHHiFDExp0iWQsbAxcRMCMwGEiRQUFlISpYsgwn2bCScXGrUqicTXJSIkRAi2F8y8XjnQnPz5m3x540TOx6at2wl95P+qD169ZEMJ/xwktEgmTHxfnCRpJBJBox+4PHixv8gSJO3bf7CRahYqbIEfs9faNPGsm3QwEHFPEz3P2bhCRsmrARHnzl3Qcz7fA6pkaclhkGqzI7UqUt3sRxRq8XJsHXrtggZIpS4FdLSRncU7jOLlleiBYqWlGhR/xatMTXMbM/vGpAJvXDpCoqXKCX+9h3sWX0onNJNikIYnw9aPUiMk6JlMFSoMOja1faxQGo2ixUrJUwx462u2bOj0WWI7gp8nosWKyGuZLzfpMlTpgkDxeB4BqozI83KlavFVY0xVDFjxBY3DNZlUDEXWvppd+7S3ceFnIGzFGaZcYtaQmYtYnBt9559kDRpMnELMMIlM70x2JhuFgxUprDId4JxSXSLauHeBu3bdRStMhkiCoPeLTpkSJj+mL7RazdslmxljJWgkHT81GkRHKJEjop16zfKWBiLQbeMHj374t9/YkgcBa+Rpv7ceQuI8Ob1nScTTCGsYeOmWLdxi7i/UenBMZHu3LuPatVryT0xWtQ+ffvJ+z185GjQYsJ7ZnybGVBMfMj80vee93eLPU0lBUrGrfB9pJuKiSniPEeBiUG2tPyRWVi3YTOGDRsp7l1Mg8kgZ69E33LeE/ZJ4YDaXiY/SJHiPwkEb9ykGajxPXTkGPgehwwRUjJhMUsf55sQf4WUZ4VZeGL+G0ueRwqlJMa4JEqUROYbxq4Yt0YGL0eOGEU+HOg14NeMjxYdWgDJXPHaDKNDwYjWQwrzzBRFwZbE9zFypKjynPL54PcX6JvOeYCWACsRm4oVKgnjyIB+ZsJjIo1dHnsETwpSTIZAgaluvQaenmcyd2TiqL3t1rO3QxlCxoxZlSh8tm3XUdwlrR+8mzt/AZIkTY7JU23JEBgzwm8jMKkFLXvMmkjiMzxhwmS0atNerJBMNkHF0phxEySrEl2G9+0/KK6ZTNrAQPbqNWo7mErrddLCwfTm1ILTJZN9erUY09OgTl03B77W9mabcy6VgrSUMXOcsRRQ8CeDy3mI98Rkx+LzyuefMSKc+yjA002Scwwte8wkaZ5FCny0jHFdpZBjfPiZiY3C06zZ83DqzDlJBMLYPKulkO82hXGTvcmMl+tyuQquMh/ROspnQdaxZi0RL14CiX8wWRXZhhZ9CiE+EQVfYsi5dsToscKrUBDnc8N4wOjR/hHlCb+3wvtDhZVJUEOPArosMRMZLWj8rgVjVjm/0NJGCwCJFiwKLrQMc61gjI+x6vAdZx9002zctAVoWeFzQqVAmjTpJI6V18z7FDZMOMmktWOXhwir5IXI81CxlDdPPoktYXIIWnppcWFKfLqn8/tgVHryPaeLY7A/g0vs7ZOnzyU4njwTrfAkBvsz3opzC9d5Pu+0FFuJQiTfcyooYv0bE6n+SyPfMeN7GiF8JJnXjYB64uRpSTaTIF4C/Bk4qLioLV66Qt6/dGnTI3q0v8Wt/ZSXby2Rv2KaY66NtDYyLohzGJU9NWrVxVWLu7d1bLrt3Ah8sQBCn3cy4lyoaLFgFgPur1m7TgKx+HLxQaJ0zgBs0vHjx8UdhZO6cWOgBopt6b5CLcqqlavla93UPLBvanb37tsni8qJE6dEq8RjDNozPqMMsKb2kMIFNZyGOOnTnLtrl4dknjHlJ06elABlMtqGWTPHvP5yMmV8g/kWBI8zmJCMBhcH34gaFQb4rV69RnzEDYNp2jAbDjXdN27elkVw7RpbXaMtNfW8/vLr56tWrxZm0jfNsbUd4xNoEj5w0KYxNMf4IcKlS5aJhYVuKwx0ZWYKQ8Sa/u3MJmNSB5PBoXaQ7lxkjM+ePQ+a863WHtOeaUOZFYVpHekzbCwktFow4wqZMe+I947PE93w6NJH9xevC69hVo8cPSqawfv3balcOS4+c4cPHxVLA1MYcgHg88IJk9dEzR39ZEl8Fpl+l3Vohmf6YWpmiDPPe+rUKdGy8Tn38NjjcNewjpvXRY0x8eVzy1TU1LayDf37OdGyfz6jxMIQJ33GqJikBCxnW3lud3t4YryoVWZdrzEfFLSpcaO1j+6MVqLQTxcu63PONLJ8J5ipiddpiNfN+89z+0a8j7RKGmaFdYk37xUx4LUaomadzw5x5IJm3Bzo+sjz8L1lkgBmhGJAJbVo3hG/h0Is2QdxZNurV6+KVpT3e/v2HY4gT46DsUkUCvlMsx2JmmsubIxV844ohNDdifeSzwpjdwzjS4sf791uDw9xT6Hlkc8+XSMYBM73nPfl0JGj8jzRNWfTpi3y7vM55hzAVNQUeA0xToLvFjWkHBtdB4kjXf84txghiW5CzJ5jLHGmvfWX13jYLjSwnEwp29AN0Wh2WX7w4CHBjoIE51YyPWzLlOdsw20+49ZsdxwXYz2ssSkcJ79wzPvq27hoVaNLI2Px3lqy9JGhZZKINWvWOdx8mHWL7yWDm7m+XLx0SbaJh3fB91SEED8+c3yuaGFnYDuvgfEpvPcUuL273xw/g/tN8D2xYaKBchUqifWcz5DX+Zp1OD7jjsN9Mph0vWF/XonzDP3jzRzHOsSLLpNXrr5/DmgpPX7c9w8c8hmnFpzPnFdi/OXho95bZa11qbBh0hfOR1ailZRzB98NQ3zHOU8RA1oYSFyj+RzzufKKKa3F7MMIqKYfCp2cz/gs0wrBuZjrniFiYwKZTZn5JUZ08aMga4jzFp9PrinGRZHHOM/N8uNjwVQKsS+en1YePie8Hv7SAs2x02rFd4LPlHmfuD4YpRzPxXtIHMkT0CWMY+H8SuGXrnD08GC/5H2sWaHY96JFi2V9MddjYr+IC9+/s+fOyTrJdYQCFjNF8T3js8N5lWsgnwWu+4x1JP9AMs89lTscEy2DXNv5/vG+8V0gz8R+ONZtO3aK9ZJj5fPH9/D8hfdrAfskz8DngPwXFXmcY/ms0PWUzzDnO8OrMJ6HmUc5h548fVrOz/WP7yfnUgq8xNAIZOb6+UtegNYgurUZ4nPOa/RtbjF19df5EPhiAeRnumRqHcjEKCkCioBzIECrHdOxWomLztHjPidZsNb90dsMdKZlyyvZGJkr6Nvvw2MUmoxbjtd2uv/1ECCDRmvZ51ClytUcmYw+p/2PaEPBcL4v7lc/Ykzf+5y0Fhirzvc+N8/HpBXMWGclKsioLPnZiBnoTNZAMzYqQXxzRTf19FcR+BoI/DICCN0uaGpk8CS15kqKgCLw8yNA9wa6bNCnnv7vdA2ghdNZiBr9bDlyi2sjg1iZUpwuUNSQ05WHrh1l7fFxzKI0Y+YcscY5y/U56zjv3b8v3/j5M8if4PdiPpboa0+feLpmci2x+rB/bB/fu97cefPBzFLr1m+S1LXf+/w/+nzMItavb39xzf7RigtaEehizVi0vn37iXJi2fJVkuTkR+Pk9fy03jA7I2OdOFam5l5ud831Wlf3FYFvgcAvI4DQvaOVe2t5iXzyRf4WAGqfioAi8GUIMMkEF8EmTZqJAGKCsr+s1+/XmhpXftCSaSyZCc2aqYaj4Acb+a0K+ljTDUvp2yPAIGj6tzPDm0kG8TFnpQvc5KnT0KF9R4lZtLr3fEz7H1GHyTUYi/Cjme8fce08J11z27ZrLylvf9QYrOel+5ObW0O4u7eW7yH9jNYPM166GNar6yZB6IxJpIuWkiLwvRD4ZQSQ7wWYnkcRUAQUAUVAEVAEFAFFQBFQBD4fARVAPh87bakIKAKKgCKgCCgCioAiYEdg6vSZkvBEAVEE/EJABRC/EHLi48xM4VeGLie+PB26IqAIKAKKgCKgCPwkCDBVM9MMm48+/yTD0mH8pAioAPKT3pivMayevfqCX0dWUgQUAUVAEVAEFAFF4FsisG7DRvkWWKTwEXHBm1TR3/Lc2rfzIaACiPPds48aMfNsx40TD9myZP+o+lpJEVAEFAFFQBFQBBSBz0GAH5xm5rjAAQIgaMBAkhnwc/rRNv8/CKgA8ovea36l158/fwgeNBg2btr6i16lXtb/2DsL6KqVLQxzcbhwgYu7u7u7tDiUAsW1uLtDcXd3L0WKu7u7FXd3p9j/1r9PE845FKcX2rdnrTbJZDKZfEkms22OElACSkAJKAEl8LsJ7Nq9D/+ECYvY0aLj37D/IH7c+Lh05drvbpae/w8moALIH3xzfrRpT5+/QK6cuaUTCBsyFJycyv9oVXqcElACSkAJKAEloAS+SKBixUr4J1RoJIwbD/Fjx0GoYMHRqnXbLx6jO/+/CagAEgDv/8zZ7ggdIiRiR4+BaBEjIUrEyNi42fbXpgPgZeslKQEloASUgBJQAv8xgW07diFyxEiIHD4CEsWLL3/hQv8tbuDnL17+j1ujp/MvBFQA8S936hvb+fbdB2TJnA18+RPHTyAdQbC/AqNcObWCfCNCLaYElIASUAJKQAl8I4EypZ0QJFAgJIwXT8YciRMklCXdwNu2bf+NtWix/zcCKoAEsDs+ZeoMhA8bDrGiRUMSn06AVpCoESNjzbqNAexq9XKUgBJQAkpACSiB30Vg1hx3BA70l8ScRosUGUkTJhLlZ6TwESQvTOgw2Lx1++9qnp73DyagAsgffHO+t2nPnr9AqhSp5KWPGzMWDC1ErKjRJK94sRJ48+bt91ar5ZWAElACSkAJKAEl8AmBUaPHokGDRpg0eSrq1q0nsaeMA+nSpRvGjp2A2nVcsWTp8k+O0wwloAJIAHoG+vQbgL9DhkbcWHGQKH4CCQajG1bC+AkQK3pMhA75N9w9FgSgK9ZLUQJKQAkoASWgBH4XgQ9WJ16zboMoOxPEi4/7Dx9b7dFVJfApARVAPmXib3M2bd4C93nzceKkF4YMHY6/Q4RAwngJ4LHAE0ePn8Bcdw/s3bff316fNlwJKAEloASUgBL4MwksW7bCIoDEjY9r12/8mY3UVv0xBFQA+WNuxa9tyMZNm6UjiB83Hh4+evJrK9falIASUAJKQAkoASVgRWCR52JTALl4SWe/skKjq74QUAHEFygBIWvJkmVmR3Dp8pWAcEl6DUpACSgBJaAElMAfSmCR5xJz3KECyB96k/6gZqkA8gfdjF/ZlMVLllosIHHi4YLOw/0r0WpdSkAJKAEloASUgB0BFUDsgOjmFwmoAPJFPP53p60Acsn/Xoi2XAkoASWgBJSAEvjjCagA8sffoj+qgSqA/FG349c1xnOxxRTKGJALF1UA+XVktSYloASUgBJQAkrAnsCiRRoDYs9Etz9PQAWQz7Px13tUAPHXt08brwSUgBJQAkrAXxHw9AlCjx87Li5oELq/une/o7EqgPwO6v/BORcvto4BUQvIf4BcT6EElIASUAJK4P+WgPs8D4k9DR/2H5w7f+H/loNe+LcRUAHk2zj5u1KGKZQ/QKiaCH93+7TBSkAJKAEloAT8FYFNm7Ygb74CKF/BBVeuXvdXbdfG/vcEVAD575n/J2ec5zFfNBGR/42EM2fP/yfn1JMoASWgBJSAElAC/58EXr/2xsPHT3D/wSO8efPm/xOCXvU3E1AB5JtR+a+Cx0+cRJeuPTBg0BDcuXvPfzVeW6sElIASUAJKQAkoASUQYAmoABJgb61emBJQAkpACSgBJfCnENi+fQe2bt2Ga9dvYO/efVi2fAUOHT76S5v36tWrr9Z36/Yd3Lh586vl7AvcuHETz5+/MLPfvXuP1avXYtXqNfDwWICrV6+Z+2gN8S/p7du30tS79+7htNdZvH33zqbp3t5qzbEB8os2VAD5RSC1GiWgBJSAElACSkAJfI5An74DkCVLNnB6/NKlndCseUvEjhEbNWrWxo1btz932DfnHz95WoSbrx1QqJAD8ubJ/7ViNvufPnuOefM9cf/BA8m/e+8B8ubJh4wZs6B16/aoWLEy2rXviKlTp2P3nn148fKlzfF/4saTp89w8PAR012sd+8+4rq+Z99+s7mHjx7Htu07zW1d+XUEVAD5dSy1JiWgBJSAElACSkAJfJbAhAmTZJA7ZcpUKWMMeqtXq/HZY75lx9p1G9CkWatvKYrVa9bB03PJN5U1CvXtPxCrVq+VzbPnLyB+vIRIkyoNbt++YxTBgUOHESF8JHTr7mbm/ckrrVq3x6LFS80mXrx4CYOHDsPlK1cl7+atW6hVux72HzhkltGVX0dABZDvYLlj525UrVoDNWvWQZOmLfDw4SMx1b148RIHDx1GnTqu6NtvAJ48ffodtX5/0bnuHqhduy7u37doIr6/BmDgoCGoVr0G5i9Y+COHf/GYV6+9bWbA4JTAXbt2xxsfM6dx8KvXr41VXSoBJaAElIASCPAEpkybgaB/BcH8+ZZv76Wr1xE4UCBkzphJrp3fy6pVq2P/gYN4/OQZKlashBkzZplcevToCQeHouCvjhtp7bqNyJQhE+LEjIUaNWrj6LHjsuvZ8xeoXasunMo6mz9ITG1+v34DsGfPXikzf8EiVKlSDavXrBUBonLlqhg+fCQ4rrFOAwcPw5mzZ+U7njlTFgQNFARnzn061a7XmbNY5LkUL19+dAXjmMXRsTiWLlshVVJoad6iFbp26SZjhYEDBqNWrdrYu++A9SnFHap06bLo22+gTb79xsZNW1C8eClMmzbD3NWrVx/Uql0H+/YdEDex6jVq4cTJU7L/xctX6NrNDZykJ2f2nJg5a45M1tOrZ2/Mnj0Xb968xdPnz1GoYGGECRkKtWrVwfKVa9B/wEA0atwEo8eMk3rWrF2HsmWdsXzFKvO8uvLtBFQA+QZWb9+/R/36DZE0aQp06NgZ7u4e4u84asw4FCteCrv37sPtO3cRL0480WzctNIIfEP1312kc6cuCB8uAq5e+/Fp7ub6zNddplSZ7z7/1w6YNdsdvfv0N4v16tUXmTNlxf37D828du074c7du+a2rigBJaAElIASCOgEJk6aiuBBgmHYsOG4e/ceqlapJuOGbt17yqXTkhE8WAgUKVIUCxYtRZkyTmjevKXsa9asJfLnK4ChQ4chW7acoPDAtHP3XoQJGRo5s+XAlKnTcOPmLTx4+AhFi5VE585d0KtXbxRxKIYnT57i/IWLcj7Ww3Tw8GGEDf03/gr0FyZMmoo6tevI/q3bProdrVu/ERs2bpHy23fukv01qteU7a/9Gzx4GCpUqITx4yegVGkncND++vVr1KhRS+op71wBM2bOkfVqVT9agTZs3ITiJUpj1KjRqFmrDgw+9ucbPHgo8ucviEmTpsDFpQrmz7cwad2mndSZMnlKuC/wRPJkyZExQya89vYWIapp0+ayv0hhB+zcuQt3791HsiTJJO/UqdOgIpWCVrzYcTF67DicOHka3Xv0kv2VXCpLM06ePAXn8hVBAUjT9xNQAeQbmNWqWVseugULPW1KX7p0Ga3bdMDkqdMlv3at2kgYNwEeP32Gw0eO4vqNWzblufH06TOsXbsOj588kX2UtE97nTEtBteuXcduH80EC5w9ew6nTp8x62EQGP0rqXkwAqe4k76MS5cuN7UcxgGvXr3G2rXrceDAIXhbTYv37v0HRI8SHdWrVTeKfrJ8+/Ydlq9YaVMn22OYJ2/duo0jR4/h3fv35rGbNm9BjKgxUM6pHC5fuYJHjx9L+8+dvyjt5TFNmzRD5IiRsWz5Shw6fATHjp+QDvPMmbNSz4MHDyWo7fr1G2a9uqIElIASUAJKwL8TmDRlGmJEi45ixUrA1bUBsmTOijlz5+H9B8uVPXryDLly5kHiRElw4eJlyaS3wMyZs33GIZYBdoP6DRA8aHAZOLNQnJix4VyuvImHStNAgQLhyNHj8j3lOoPgmVKnSoPSVsrHQgUKIXiw4Lj/4CG2bdv+yXiHM2qOHDVGjl28dLns53fcOh09dkysNvfuPZBxwcVLl3DuwkUEDhQYTRo1kbqjR4mG2LFiy2EbNmyUenr0sLhrpUyWAo4ORc0qM6bPiITxE+Dxk6do1rSZlD1xwmLBMAoRWaqUqZEhXXqp39mpnJS7d/8BTnmdRfCgwVCmVGkpXs+1PkIFD4m7Pp4js3x49rGyrtStW1+EQ8NSQq+WJEmSgfeE6e27D0iaOBnKlC4Lb29LkL1x34w26fLbCagA8hVWh44eR/AgQZEre05fS968dRubNm+TfVUqV0GShIkwe64HnMqVR/y4CbBl63bzuAePHonrVoYMmdG8ZRtx36IA4lqvoQzIafYcOmI0YkSPiYb1G8rL3LBRU8SMEQuzZs+Veg4eOoJsWXOgSKHCuHnTErRGSb15i9bIn78QYkSPhf79LeZKCkgMeqtcpRoSxE+IZs1agIIH042btxEpQkTUrlnbbJ/1Cme34MuYPl1GMTFeuGj5NfVWrdshWpSo6OHWC0OGj5K2tWvXAR8+fMCHD0BFl8rSAWTJnAVLl63E6bPnkTdPXlSpXFWq37Fzl1xr1EiRhMXQ4aOQI0cuhAkdBs7OLlLmxImTyJApCxYvWWbdJF1XAkpACSgBJeCvCQwbNgLx48bHsmUrZdao53auThy0J0uaHNXtLAxUcFKI2OMfmvOTAAAgAElEQVTjptS3b3/Z3rff4rYUPUpUlCppGWwTULYsWcG89h06o1btuqCl4eKly3j/4QOSJEqCck7OJseM6TIgScLEsj1nzlypl4IS07Nnz+RbbowBNm3ZJvsbNmhkHs9RxeSpM5A7d14RGrJly4EFizxx4OBhRAr/L/LlyYuGjZugeNHi6Nixsxw30ScWxnPxUvD4+HHiiXXHqDRtqjRIGDc+WrdpL25kFSu4wFBSGmWo7M3GoP44ceFarwEqV6qMsmWcxCNl0+at0s6OHTpJ8dIlSyNqpCjgDGBMI0eOkv3kYyRaNkIFD4FTp70ky8WlMhImSAQG9xuJ7la8DwsXeeLp8xe4c0c9OQw237tUAeQrxBi7wIeNkvDXkmtdVyk7dtwE7Nt/UNZpfmTi1HWZM2cVVy5uFyniKPEkXOf0dTxH3rz5QH9QV9d6st2wYWMZwCeKnwBpU6eVevivUkUX2U+TIVOxYiWlfXyJ+/Xtjy5dukl+ixatEO6f8LLet19/0ZYYpkK6jNH/sWaNWrLf+h9NlM7OFcxZMlzr1kO+vBZz7cpVq+XcfOmPnfRC3tx5kCBeAty6bXkJt2zbgb8CBUKH9h3MKlOnTIV//g4LwzWtVIlSiBkthvxYEQutXbceQQMHRVHHYuYx/PFEmmk1KQEloASUgBIIKARGjhyNOLHjmvEI9td15dp1sVA0sBrgs0wzH5chegwwtW7dVr7FxmA5YfyEKFmilFldlkxZ5Nv80T/Bsuvp85dInjS5CCRG4WyZsyJB3Phg2QULFkq9kyZbBJAlS5dj5KixRlE8fPQY6dJmAK0Zt+1+Y2zSpMly7NChw6W8MQ5q06adebyxwlgWjnsYc8GxS6IECVGwYGFjN2JFi4GC+S3jDjPTbuXlq9dIniSZCCF2uzBrjjuCBPoLTZo0lV0VnCsgWuSoYilhxtRp0+X8nTpbxkvMq1atBkIFCwHGsTC51q2PRAkSgx4cRuJsYCmSp0S6NOmwY9de0xJi7NfltxNQAeQrrOrVayAPaX27zsC3w6pWroKggYPIQJuSOl8uYzYIL6+zMjAv6uCI8RMnywA8VoxYUg1dslh27Njxst2oURPZXrPGMuNEymTJkT5NOvOUjRs3Qdi/w8j22XPnpOz06RY3MLMQAPonco7uA4eOwNHBEUH/Cmz6jIoAEjEyatb41I+TAfWBA/0FmmVpdk2cMDHSpkkvVW/ZshVBAgVGu3btZZtm3CiRo4BT1THt2LVH2sPgMiMVKVQEf4cMjd17LVPb5cmdB3FjxYa15sfgbJTx60B+o226VAJKQAkoASXwXxHg4Dzs32E/O7XrpctXEStmbNNrwGgX3bojR4qCIUOGibtWqpRpULJEabzz+c2KJImSonDBwrh245bkMUid44ohQ4ZKFS9evABdsl+8eo3YMWOjqONHd6cUSZMjXJiwePcBmDvXXY6bMsUypqCrtH2Q9cEjR/FP2PBIkzot6FZtJMN6MnHSFMm6d/8+UqZIhXBh/8G585aAdWNKW8MCwZgVpsgRIoo7GoURpmrVaso4hEpPpn0HDuKK1e+MSCaAgvkLSnuNGbo2b9km5Vb7jKvq1q4jRYs5FBXh4s5di+LW4FO5cjVwgiGmCuUrSF38IWemli1aI0qkqDKGkgyff6NGj0XwoCHEhdw6X9e/j4AKIF/hNWOWJTiqUMFCny353icGolrV6ggWOCjOX7iEw4ePyMvTsmUbOY5BTRzU16pZCwsWLgJnujhx0vKQu/sEhNMFi6l2LUsQ2IoVK2U7RbIUyJo5i6zzXz3XeqYAwpky2MnMmDHT3G+9smXrNtDNiTNphA/zDxYttsycQTNkpH8j+eqCtdnHdEmNy8JFizF//gKJ02C9nLqPFo6WLVuLtoRmXGoMjE6F7mhsT7u2HzUeBfLlR/iw4XD0mEVzkyN7TsSOEQsPHj02m7p9xy78HSqUBImd9jpn5uuKElACSkAJKIGAQKB33/7IlCmrKPVy5c4HBnfbJ86wRAGFSr9Fi23dkHfu2oOcOfNIADoDuznLlZE4vW+0qNHB2aqMMQndtKJHjY4yZcth2vRZ8nsXm7duR9w48UTIOXLsBDZs2ow4seJIXEq/AYPRqVNXhAwZCnXr1sP2nbsxc5bF/ds4j7G8eOmKuEwxsLtFy9aoXccVLpWqYtz4ieICZZTbu/+guDGlSpUGjRo3w+Ejx8AYjVKlyyJC+AgoV648xk+cIl4eFLDoksX06MlTFCtaAlEjR4VLpSqYOGU6HlmNGYz6KZgxXiZu7HioXKU6ODkQY1maNm2BkKFCo3ixkhgxehwKFyqC0KFCo2OnLnKoBOk7FkO8uAmkTZcuXxHXNwpLg30sOGvWrEPYMP+gQ6eueG8V7MH7NmGiRcgy2qHL7yegAshXmNFfsnixEjKo5stvnd6+eSMD79s+sznVd60v0+vxxaG/JAfivXr3k0M4YxW3W/jMZmFdz5IlS2Xf5MmWecENCwgtI0xpU6eROArjmBbNWyCCj2sVX6IQQUPY+HM+8Qlwp0WFAWB79x3EwIGDxTqzboOlw/N++w7RokRD/Xr1jWrN5cXLVxAqRCiZms/M9Fnhi0cBpH37jpJToUJFRAgXQWaIYAbny+Z19u1juW7mORZ2QMTw/5plcufKjXix44jZ1drNiuZjHrtrzz6fs+lCCSgBJaAElEDAIMAgcAZ58xvLyWHOnjv/yYVxUhoOqg8dOgwvr48T0BgFT546LUrBp88sgdFGPpe7d+/FhQu2U+PSo8HDYz4uX74iRa9dvw7WsXfffnBSG8aK0q2Lbkf79x/AuXPnZWKc48dPiCXhS7NtcqKa1avXSJD70qXL5OcIrNtjrN978FCm+V23boNkse2cbIfnPnDwkASt0/JDhep5H0sJC759916uacGCRfDteo36+aOIjC9duHCROf0v6+I1cRIfnosWGFo29ln9yOCjx09gxNA8fvxY1nnMaS9LDAjrp8BkWESM8zGwf/+Bw8amLn+QgAog3wCOPn/Fi5dE4sTJMG78BCxevAQrV67GXPf56NGzj2gVrt+4idQpU8sAevrM2ZjnE9dRsEBh8CFngHbnzpZ4kpYtWmLZ8hVY5jN3tJubZWo3Bp0xTiJf3vxSz/gJkySIi36MoUOEkk6BPo/ZsmaX/QsWWGblmjpthmzTLYpmyK0+v9rZokVLye83YJDMDc7BfbdubjLwZzlusy76dNqnmT7T4nH6P7pxHT1usdYMGz5SjnMo4oD9Bw8hWZKksj1j5mypgrN2sN78+fLLjF+0cjDgjXlTplrm6G7brr1sj58w2Zz9iwfT15RTBZKXJiWgBJSAElACSkAJ/AkE3r97j1OnvOSX4CmknDmrnho/e19UAPkOgmPGjkelSlXlR2k4fR59DQ1T59ZtO8Cg8SaNm4C/e8GpeWmpcHWtb/Ogcs7qokWLoUHDxjInN2ePGjR4GBhwxtkh9uw7iJ49e8txNEWOmzAJjRo2khkxOM3utes3xbTIHyLkD+kYafKUqSiQv6DMGMFp65gYAFazdh35EUDOntWmdVv06t0X3t5vMH78RLFw0Bx52ssScGXUZSw57R+DwvijizSZMjEmpK5rPXTv7oYpU2eKJaR+/QagEMREMyVn4aIwdfnKNTHF8rdTqlSpCkNIoTaE19++QycbsyZ9PxnEbj1dsFSq/5SAElACSkAJKAEl8JsIcMZSzkiaNl1G3PGZAOg3NSXAnFYFEH93K40QrR9vOIWePzHR9Gz966l/Yhu1TUpACSgBJaAElMD/H4ERw0eCv7D+8tXHX3n//6Pw665YBZBfx1Jr+gEC/A0TWkLmz1+I7Tv34O69ez9Qix6iBJSAElACSkAJKAEl4F8IqADiX+5UAG0nXbacnSsiTux48sulEqASQK9VL0sJKAEloASUgBJQAkoAUAFEn4LfTuDN27cyK8crNWv+9nuhDVACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJSAElACSkAJKAEloASUgElABRATha4oASWgBJSAElACSkAJKAEl4NcEVADxa8JavxJQAkpACSgBJaAElIASUAImARVATBS6ogSUgBJQAkpACSgBJaAElIBfE1ABxK8Ja/1KQAkoASWgBJSAElACSkAJmARUADFR6IoSUAJKQAkoASWgBJSAElACfk1ABRC/Jqz1KwEloASUgBJQAkpACSgBJWASUAHERKErSkAJKAEloASUgBJQAkpACfg1ARVA/Jqw1q8ElIASUAJKQAkoASWgBJSASUAFEBOFrigBJaAElIASUAJKQAkoASXg1wRUAPFrwlq/ElACSkAJKAEloASUgBJQAiYBFUBMFLqiBJSAElACSkAJKAEloASUgF8TUAHErwlr/UpACSgBJaAElIASUAJKQAmYBFQAMVHoihJQAkpACSgBJaAElIASUAJ+TUAFEL8mrPUrASWgBJSAElACSkAJKAElYBJQAcREoStKQAkoASWgBJSAElACSkAJ+DUBFUD8mrDWrwSUgBJQAkpACSgBJaAElIBJQAUQE4WuKAEloASUgBJQAkpACSgBJeDXBFQA8WvCWr8SUAJKQAkoASWgBJSAElACJgEVQEwUuqIElIASUAJKQAkoASWgBJSAXxNQAcSvCWv9SkAJKAEloASUgBJQAkpACZgEVAAxUeiKElACSkAJKAEloASUgBJQAn5NQAUQvyas9SsBJaAElIASUAJKQAkoASVgElABxEShK0pACSgBJaAElIASUAJKQAn4NQEVQPyasNavBJTAbydw9NhxbN26Ddu377Bpy7Xr17Fz127cvXcfN27ews6du3Dk6DF4v3lrltuzZy+OHDmKJ0+f4+LFS9i9ew8OHDiIy1eummW+tuJ15ix27Nz1tWLm/tevvfHo0WPZvnP3LjZs3ISDBw+Z+9++/dg+M9Nu5e7de7h0+QruP3iId+/e2e39ts03b958W8FfVOrBgwe4cvWaTW03b96Sa7//8BGuXb8h/A8dOox3797blPuZjffv3+PkyVM4c/Ycnj57hg8fPvxMdeaxrOfDhx9vJ6+Tz6MmJaAElEBAI6ACSEC7o3o9SkAJmARu3rqN9h06IUvWHKhYsRJcKlWF5+Jl8Fy8RAabW7ZtR8GCRZAhXQbkypkbDRs3ReZMWZAje054nT0PDkPbte+EPHnyI02qNChUqAgaN22OylWq49/wEdCwYWM8f/7CPJ9vKytXrUaO7LmQPn0mdOve07ciNnn79x9E/wGDRcA5c+YsqlWvhSJFiqJzl+7S9tlz5uHR4yc2x/i2sX3nbiRPllKu6+btO74V+Wwex9+HjxzD06fPPlvmW3a89n6DRYuX4lsFGc/FS9GxU1ebqrfv2IXChR2RIlkK5M9XAPUbNEb6tBlQuGAR3L57z6bsj27wet169UH06DFQo0YtvHj56kersjlu3YZNpiBps+MrG97ebzBpyjSULlMOEcL9K/eQgqQmJaAElEBAIaACSEC5k3odSkAJ2BCg1SNB/ERInz4j9h84hLfv3uHN27dYumwFcubOh/ETJ0v5CxcvI2K4CMieNZsIHDt27ESo4CEQL258nL9wUcrMmeuOQIECoXHjJuBglQJA4UJFJG/o0OE257XeoPY63D8RMGzYcNHex4geE82atfisRWL/gYNIniI1li1fIdXky18Q4ydMMqscMGAw0qRJ/4mVwCxgtUKtfsZ0GRAx/L94+uy51Z6vrw4dPgoUdH42jRo1BqVKlf2map49e47lK1fj+ImTn5TfvHmbsC7qWEwsH7t27UHoEKHk3p45e/6T8j+aQSGUwiitXT+bZs6ei959B/5QNStWrkb06DFx7vwFrF+/ASWLl0Ldeg1Bq5YmJaAElEBAIKACSEC4i3oNSkAJ2BCg5aJQwcIIEugvXLl23WYfN/btP4ghw0ZK/oNHj5EoQSKUKlnaLFehQiUZ8FL7zrRmzTrZ7t27r1lm3PiJktepUxczjxaLZs1bYpuPq5erawMEDxIMO3fvlTK1ataSYwzBxjwQwPMXL0TjPX7CRMl+/uIlEidKgl49e1sXw+SpM3Dnzl1cuXoVzVu0wtAhw3D77l0RrNq0bYeVq9aY5QsVKIRC+Qth7/5DaNy4KXr0cMNrK7eqkydPo24dV9Sv3wg7d+0RAYzXFSdWHGTMkBETJ03Bw0ePMXrMOHTv4SYD4CdPn6Fjp87YsmWbnOf8hUtyzQ3qN8Tq1R/PPXHSZIQIGgwJ4yfAyFFj8dLHqnDm3HlUrVJd6jYbCoACY3c322s19h8+chQRwkVArVp1jSyUKVNOWC5evBQPHz9Gx05dMH/BQtm/cOEiNGnSDC9fvpTtNevWo1q1Gujarbt5vLGyes06sWRt27EbdevWg4NDUREQ6T7F+z1w4GDQkrN123Y0b9ESU6dNx2tvb+NwrF6zFlWqVEeTps2xd98+yZ8+YyYihA2H5EmSYczYCWZZ6xXvt2+lfl7TtOkzzV179+1HlsxZETp4CDRt2lzuCXfyeso5V8C79z/u0mWeRFeUgBJQAr+ZgAogv/kG6OmVgBL49QTWb9yMv0OFRtbMWcwBnPVZ6BJ04eIlyTp/8TISxk+IIoWKyMBy3dr1CBE0OJImToaLl65ImWXLViDwX3/JgJDuMUxly5ZDpgyZcfHSZdnmP4/5C2VQ3KOHxdWqatXqsk03JKaePdxEIDl2/FMt//TpM5ErVx6zLq64uFSW40uWKIlp02fY7Ltz9x7y5skn++kuxRgTWmlKlypjlnMq44SoUaKgZ88+6NdvAP4J+w8aNmwi+w8fPoIsWbKjT99+GDFyFPoNGCys2HbWU6lSZSxZuhwvX71G6zZtJW/goCE4dfos4sRJAHeP+SKE5MyZB2PHTUDxYiXE4nT2nMUiQQtAuL/DIEumLFiw0BNv3rzF8uUrUKiQA9zceqKiS2XMmDnbbCutVMuWrzS3rVcoHEUIFx4VylcUQW3d+g0IGTwEsmfPicdPnuLJ06eIGjkqkiZOCgoSnbt0Q4zosXD9xg3s2rUbmbNkR89evZEvX35Uq1bTrJr1/BMmHDp27IzZcz2QIF5CODtXwHuJ3QCqV7cIjBTqDh46LAyiRYmGZz5udxS4EiZIhJ49e8PRoSgcHYtL3SNHjhbhq6hjUbl284Q+K69ev0aFCi7ImSM3hg0bgUwZs8DNrZfs9fI6A2fn8ogSMRK6dOmG23fuSj750DrGmCVNSkAJKAH/TkAFEP9+B7X9SsCOALWzt27fwa07d8FBqn/+Y+zCg4eP7K7w65u9evWRwWLFCpXw9t2XA4rpgpUtSzbRVjdu2gIZM2SWweGlKx+Doem2xcF0poyZUKa0k/jkDx1usaBYt+bhw0fYu++AWCiYf+jwUUQIHwH58+WXmI7GjRojRJBgoBXAPlFwyJs7r032w8dPUKlSFbkWCgW5c+Ux3cJYcNq06bLv1GkvOS565GhwqVjJrKOck8VKYFgGXF1dRQBigb1798mxi5dYhCMjqHvNmrU++cvMehgEz/O3bNUGL168lGeKO6dOnSYxM1yfNWuOlBkzdrwc9/b9BxHsKDQYqUghB8SOGQvv3n9Am9ZtpDzjdHjuzl174MFD3+Mc9uzdjwTx4iNdmnRo0rSFxIDwOp89t1g4WH/1ajXELWvMuAl49dpbhJJLl68ierTocCpbTpqwdOlyOadhoSqQvyDixoor+ygU0F2NghSPZ1q2YjWC/hUEUyZPkW2eP2niJBKkLi5uGbOgnmt92cd/Bw5YJgrYvWcf6G43aPBQc5/1Ci1r5DnRx72uWtVqiBwxsgh7LDdnjjvC/xNBJkYwjqPlKl/eAiLsGHnfs6TQRNYf+4P7Vuv+u5/4eE1fvw5OOMHyN2/dwbPnP+9q9z33QMsqASXwkYAKIB9Z6JoSCBAEtmzdhiSJkyJVipQyqE6SMBGSJkqCpIkS+yy5br3NfGOfsW4sv1bWer91Hb7lG+c06jbK+1bWUuafv8OIRvp7bwy1yhzgMU7jy+IH4HXmHNKkTotGjZrg2LHjMDT41udc5LkYQYNYBqLky7o5eP2WtGPnbjB2oWmzlkiTKjUyZ8rsaxB56pSpbASQl68+BkIfPX4SlVwsgkiCeAlw+44lqHzggEEIEiiwBNSzLZH/jYga1T9q+Is5FkOEf8Jj206L1rxFs+YIFTwkLvnM4DVmzHi5ljKly+LZC0swvbv7PMmzjj2hSxOvefacuZ9cMi0k7h4LRSgLHiQoJk6aKmU4S1isGDFRptRH1zaHwg5IljgpmjZviSpVqsHRoRg4QxjvEYPsKcD5ljZv2YqIESKCguXp017wOnPGphiPL1LYAenSprOJd6FbF9vdoX1HKb92/UaEDhkKbdu2g/fbd0iXNr1YnXg8/1ImT4kiRRxlHw+ghSZUiFCYO9cdr7zfIlWKVPJecd/BQ0cQKNBfNu5TRqMWLlqMSBEjo73PeY18Yzlw0FAEDvQXPDzmS1aTxk0RXu6TxeVv1KjRIoAwJshIdANjjAotND+SBg8ehr9Dhkai+Al96Qes30lj3XgvjaWR/y1L4103lp87xn6/9baxzqXx97l6mG+U962M5XhLX5hYJpSIEzOOuBb+CEs9RgkogZ8noALIzzPUGpTAH0VgzZr1+CtQIIQOFlx8+ePHSyBa59gx4yB2zNif/sWyy+O2/FmVN8rEimPZx3qMPFn/Ur5VPcb57eu3ybe0J07suDJ4LOJQ9Lv5XrtxC7FjxJKZqp69+Kglt67ooY+23evsOSRNklzclKz3W68vXrJM2rJ0icUqwJgGDmydnJzxniPXb0gbN22WgHS6afmW0qZOi3xWFhAGsJ84abFsGOUZ70CXoTVr10tW//4DpB3nz1uC5aNEjIxatWoZxVGiWAmxeKxYaYnNaNG8hZSn0GUkd3cPxIgWA2XKOkvWpk2bEeSvIDYxGp6ei+W4xYuXGIfJkrEoFHgoyIybMAn/hPkHU6dOl33UMMeOERtlrFzCaOHJkS2HxFRYV8QB+5Gjx62zbNZ37dkr1z15skW4sdnps0F3tAL5C9jsOnnqtLS7i0+cziLPJcKjT59+YoVhnAtnPDNSssTJxLXO2GYMTIigIbBokadkUWBJlTylrDOOh0JDzZq1jeLmct36jYgWJbrEpZiZVivjxk8SxkuXLZdcV9d6CPt3WBz2YTBp8hQRYChAGentu/fIn78gqlsJmMa+b1kaVsG4seNKv2C+v3ynjffvS0t5Z42+warPMPKNY637BebZbxt5Rr5xfvtto5xRr7HNcr79GeWkj7Lqj4z8mLERP258xIsTF2FChpTngm6JmpSAEvg9BFQA+T3c9axKwM8IbNq8FWFChkbMaNHhscATp06fkSDoXXv2wT/97dl3ABs2bcXe/Qd+iBX99oMGDooK5V3wxup3MPi7GgMGDTWtBjdv30XcOPHQrl2Hz55n1WqLW5L1AJgDQQohgwZ9dLPhoLRL127Ytm27WRflkyFDhspUsi1atv6sRYYB6/nz5jePO3vuAjJnzo6hQ0eYeQxQTpc+M074zBTVu7fF1Yy/YUKLCdtTq8ZHASRPztySd9rrrNSRL18B0aLzd0T27T8AzrbERFelFClSg/Et/O0J1jN69Fjcu29xiVqxYoXkUQCyTsagdt2GjejatbuUoSsW0ytvb0SPGk2EIOOYCuUriHBszFxF68f2nbvQr/8gMID/c4kxLkH/CooRI0Z9rogINpkzZrbZ/+LlS2TMmNmcYKBrl64IHCiwaQGiFSZM6LByDIPrw/0dFuHDhjN/s2X+AovlhwLI1es35frSpEhlnqNmjdqSd9ZHALx8+YpYcXh9jBVp3rylWdZ6hS5ztJ4M93Hjy5UzF5IlTW4WGTBgoNRrLYBwJrO8eQtg3Djfg9rNgz+zQssepwXed+AQdu/dL30BJ0fwT33Cz7SV13royDEcPHwMdV3rC98RIz51o/wMPs1WAkrgFxNQAeQXA9XqlMDvJrBpyxaEChYCUf6NZBMg/bvb9TvOv3HjZuTMmRuOxUqgUeOmqFe/IcZNmIx5HgtBH366UxUvUQrx4sRD0iTJQNcY68RBffcePcGBe8QI/8rveQwYOESKvHzljdSp0ojmuq5rAwlgX7tugwxsunW1zLY0fMQoZM2aA1kyZ4OHxwLrqj9Z59SymTJlk+l6uZPB7ePGTwBjTRo3aSazOLm4VLUJamYwe4rkqcBYlznzFiBe7HiIGjkadu+xzLpFdx0KElOmzhAByLm8C65evyHnpjBTqrQTatSoKYHpc+bOw/v3H+S3P4oVLYFkyVLiyLETMn1x9RqWYOyCBQrbTAE8bdoMESjKlSuPvv0GghYY1mnE7RQrWlx+x2LxUoumn4ITB9pJkyZHzVp10KffABkUM8bDmCXLHgxnLCtatAT+DRcBWbNmR6fOXcEZwqzTwUNH5V6ECh4Ko8aMs96F8xcvoXyFSihd2glp06TDuHGWGBUW4g81ZsyQCY6OxeDWq68M8CtXropjx09IHddv3pL2pk+XAYOHjgAFnAUOViQAACAASURBVDChw2CijyXm+vUbyJY1O5InS4FOnbthw6YtEntCAY+WFVofDx4+atMeY2MI68ucDS4uVZAvX0HQzYyJ8SGJEyZB8MBB4eRUHhREmVasWIUIESLh2DFL2yRT//0QgeEjR8t7Oniw5V3+oUr0ICWgBH6KgAogP4VPD1YCfx6BTVu2yhSekf+NBE5f+v+e+Jsd9LWfNWu2TKFqHeNx/PhJzJw1WzjxBwNp6bBO3m/eYOFCTzAGxMvrLJYvXynT3VJ4Ybpx8zZWrlyNuXPngUHMz58/By1Q/PVuplWrVoNWk5cvX8v21/7VcW2AKlWrSzHrXztn7AUHzkd9+VXsK1evy/Szp73O4Ny5C+CMXUaMxFWfKYj37TuACRMmyS++W7eB5UePGWu6GBn7OPUu3ZVev34tv51C97H9+w9I3betftSQvxq+Z+8+0G2LMQrU7K9bt8H88T1aZsjVGNCz/us3bsoPQY4fPxEcwDO9sop3MdpgLHm/FixYJL8IvmLlKrlW/lK8deKvp+/Ztx+c1Wr7jp3Wu2SdwdcTJ07Cps1bPtnH39agIMVZxG7dtsw4ZV2IguCChYtw6tRpEazoXmUIeCzHuJUZM2ZhnscC8LdMjHTx4mV5nm7cuGlkfbKk0DFp0hSbuKMTJ0/J78AcOnxElrwXTHT34xTPmn6eQA83y0xvgz8zScDPn0FrUAJK4GsEVAD5GiHdrwT8GQEOgPkbApzG85AKIP7q7t29dw81a7tiwqSp4lLlrxqvjfUTAvw1+s5du6Nd+05+Uv//Y6U93Hr7WEBsLZ7/jyz0mpXA7yKgAsjvIq/nVQJ+REAFED8C+x9Vy5iR+Qs9cfzEqf/ojHqaP5kArSTde1h+I+RPbqd/apsKIP7pbmlbAyoBFUAC6p3V6/q/JaACSMC49YabV8C4Gr2KHyWgz8GPkvv8cT3ceqkF5PN4dI8S+E8IqADyn2DWkyiB/44ANaahg4eUIHR1wfrvuOuZlIAS8B8E3NQFy3/cKG1lgCagAkiAvr16cf+PBAwBhEHoDGTVpASUgBJQAh8JqAvWRxa6pgR+FwEVQH4XeT2vEvAjAiqA+BFYrVYJKIEAQUAFkABxG/Ui/DkBFUD8+Q3U5isBewJGDIhaQOzJ6LYSUAJKAFABRJ8CJfD7CagA8vvvgbZACfxSAoYAotPw/lKsWpkSUAIBhIDGgASQG6mX4a8JqADir2+fNl4JfEpABZBPmWiOElACSsAgoLNgGSR0qQR+HwEVQH4fez2zEvATAkYMSBQGoesPEfoJY61UCSgB/0tAXbD8773TlgccAiqABJx7qVeiBITAJpmG1+eX0I8eUypKQAkoASVgRcCtZx/9HRArHrqqBH4HARVAfgd1PacS8EMCq1avkY9roECBsGfvfj88k1atBJSAEvB/BLr36Cl95KBBQ/xf47XFSiCAEFABJIDcSL0MJWAQ2LtvP/LkyQ9Hh2I4dcrLyNalElACSkAJAGjcpLkIIF27dlceSkAJ/CYCKoD8JvB6WiXgVwTevn2Hp89f4NmLl3j37p1fnUbrVQJKQAn4SwKcqKN7dzfs3LXbX7ZfG60EAgIBFUACwl3Ua1ACSkAJKAEloASUgBJQAv6EgAog/uRGaTOVgBJQAkpACSgBJaAElEBAIKACSEC4i3oNSkAJKAEloASUgBJQAkrAnxBQAcSf3ChtphJQAkpACSgBJaAElIASCAgEVAAJCHdRr0EJKAEloASUgBJQAkpACfgTAiqA+JMbpc1UAkpACSgBJaAElIASUAIBgYAKIAHhLuo1KAEloASUgBJQAkpACSgBf0JABRB/cqO0mUpACSgBJaAElIASUAJKICAQCLACyO0793Dp8pU/6h6d9jqDJ0+f/lFt+tnGnDh5Ch8+fPjZagLE8WfPnsfz588DxLV8z0W8ePESXmfO+pvnYPOWrTh+/OT3XOJPlz1x8jT4A5G/Il25eh23b9/57qo2b9mGQ4ePfPdx9gfcun0XV69es8/W7V9EgM/JmbPn/PxHRA8eOoxNm7d8tdXXb9zA2vUb8fQ/+nbt2bMPI0eOxuUrV7/atv+nApcuX8W9e/f/00s+cOAQvL3f/PA5b926jXv3H3zx+Nfe3li3fiPOX7j4xXK688cIcHR2+sw5vH792qzg5KnT5vrvXPluAWTnrj3o0aMnZs6cJZ3EwIGD0b/fAHTr1gO79+z9nddinnv7jl1IlTodRo8ZZ+Z9z8o8jwXo2qUbOnTshCNHj3310OfPX2Dq1Onyy6ojRoyCh8d8jBo9Br169cGwYcNx/8EjDBg4CJmzZJcPy1cr9AcF3r1/j2pVq6NggUI2A8/rN27C+82Pd1h/2qV/i3D16rU3WrRohdy58+HO3XvfdQkvX77C4ydPPnvM6tVr0LNnLwwcOATDR4zEIs/FmD9/AWbOnC3HnDt3Xp6zJUuWYe5cd8yYMRN79h7AmLHjMWvWbCxfsRLDh4/EgAGDMG7cBDx9aisgbd+5C127dkPfvv3x6tXHDuqzDbLbQSG/UGEHNGnawuY5sCv2R22OHT8RK1ev/ek2sUO/dfv2F+t5++49atWqg4IFCv+SAeXChYuQOHEysB/+3jRu/EQsXrLsew+Dt7c3njyxKE7Wb9iI9BkyYYbP8/fdlfkc8ObtWzx6/Pnn/kfr/S+O4wfdr3Qu9x88RJky5VC5SnV4v3nr6+U8fPT4l/SxnouXSD/h60msMo8fP4Ehw0bizp27kvv+/Xuwn+fyVyf2hQMHDUXxEqVQqHBRnDt/wddTTJ8+Ez16uKFb9x7YuHGzWca+TRxoDRs2AjNnzMLFi5fNcsYK36Mz587LJp/tkSNG4eix48bury7nL1gI9sF+mfi8jR03AanTpMe+/QdtTkXhYPKUaejbpx/27ttvs89648aNmxgyZKh8B+7d/zYhZvjIMeg/cIjZr0+ZOh2NGzfF0qXf3odQiOzUpTv27N1n3Ryb9RcvXsjztWv39/Vp27btkGfAY54Hho8Yhf79B6J37z42/cqVq9fQrbsbpk+fgekzZmLQoCHo06cfhgwZhpdW37vVa9eDY7epU6eha7ceOP+Z586m4T4bc909hIu7u4e5m+OAESNHYfy48TJeNsbGDx4+xKCBQzBp4iQMGjQYhw4flWOePHmC7j16okuXbnjy9JlZz8+usI+tVLkqypV3EUGS4xkXl8ooXrzkz1b9S47/bgGED4mTkzNSp0qL5s1bok/f/ujStTtat2mLjp2748bNW7+kYT9TyabNW5EhY2ZMnjz1u6t57f0Wnbt2R7NmzdG4STNQA/C1RO1QRZeqcOvZCw0bNUHiREnRuk07eaDq1W+IvfsPgoJagQKF4XXm3Neq8xf73717j/LOFVG4UBGzve8/AOs3bvGTD5N5kv9whZ3SkaO+f4yePX+BhZ5LpDVv3rxF02YtULBgka9qe+ybT2F54SJLPfb7zp67gJgx48j7Nmz4SPTq3Q9JEidFoECBkD17TilOAaBkydIIGSw4ggT6Cz3ceuHAoaPoP2AQ4sWOh+BBgqFdu47o0LGzHOfgUBRPn1mEkHkeC5E+fSbUr99IOqn6DRrj/oMva6vs28gBQsGCheUc9vsC+jYtEbv3fP7Dyut/++4dXCpWRqGChX/JoHXWrDmIGzveFz/ov5o7LbezZrtLtatXr0WGDJkxY9acnzoNtZ3Tps/6qTp+18EeCzzBQZNfpKdPn6FUqbKoUbMOXn9G8zx+4hTc/4pW2S/aZtT57PlLbN663dj8Zctbt+9gwOBh5nXPmDELp0+fsan/xq3bqFKlGqJFjY4OHTrJgM1zyXK0bdcBPXv2tinLDQ6AHYoUlb5v5kzbZ3bipClo2aotrl2/jtmz5yJE8FBSLmnipNi5+9P3+tmz59ixczcePX5snmfW7LlwKlcB++0EA7PAL1oZOWoM0qXPiMNHLANWo9o3b99h/ITJlnYnSW5kf7KsUaOWlOnUuRs4CP5aWui5GC1bt5PnnEqyYydOYdTocahduy6yZMmOmbMsCjDresj64MHD1lmyvmHjJtSoVRdnf7Ggtn3HTnlX4sWNj6ZNW2DI0OHImDEzMmfKits+wrIIQJ27IWmSZMibNz8GDxkmY9Yc2XMhe7acoHWPaf2GTciTOy8SJUiE9h06f3NbqdBp0qyFKCD5HPTs1Ufqo5KA4+ME8ROhfHkXs7/ms8PvcbQo0ZAzZ25QQOZzNWOWOzp27orcufLK9/TmzZtSz8/+e/DwEco5V0DVqjVkXEYBpEwZJxQv5k8FEAK5dv0m6jds8onmllL2zVu38ejRY5uPLU3K7Fhfv/Y2eT5+/PiLgx1rU+PLly/x4MHnX5rbt2/D3gmoT7+BGDPW1gJCyf9LLx/P06VrD/CDa5+oWbl37x6ePftUOj1w8DAuXLwkh9y+cwelSjmZgzxm3r13HxcvXUaTZi3lxWBdz3xx1fF+7Q0ytE/WbbbW8Dx+/MTUStkfY7/98tUrUzBgHex07RM/qrwvRuK1vrdS9VELSs2lkfbs2Qsnp/KySXYtW7XB+AmTwHMZ6e43WASsr/nNmzemtpV18IUxBsxGnVyynPVxtvve4uYXBGEe99bqOngsr5UdrZHIvGLFyhg7djzoYmSdyG/ZshUoVbosqGFhOnf+Ipq1aIP79++LqwIFNPvEZ8PQJHMf35U6deuhX/+BeGXFzDhu9Zr1mDp9prGJk6fPIEL4CAgVPAT2WwnGJ73OIEK48IgVPSbuP3xklncqWw7BAgfF3XsWoWLosOHyEerdpx9evHqNGFGjI0XylGb56NFiyEfdzPBl5RUtNnaaa2qA2rbvhPeUQH0SB0gPrdry4uVLeXZevX5tmoJpQWC/YDxTZPMtAzu6H3Fg/y3pS88fz8928RHndTHxuWDfY+1K9/zFi0+etWPHjqNO3frYvGW7jeWI7w87fevEwUlZJ+evujLQFcHaesiPEDXi9qlatRpYvnylTTYtMdbPr83Or2w8ePAAtGj6ltiXc3DXpk178xnt0bMvFi9ZLsWt+ybr4/n+fc6ixjo7duyCNm07+N4PWllQKRDbP2/W5/FNYP7Su8/njRZr+8T+5Pp1y2DEfp+xzXuza9ce0R4eOnRELEPGPusl66LryecS3XB9c8fjs/j+3XsZ5HLwx7ZaJ9Y7f/5CODtX/MRFi/eQ/YuReD+fPnsm95XfXeO7wXti/R02yhtLvrf2z6+xj0u+G40aNcHMWXPkXWUb373/IH2kcQ5+C/h9e/eZd5R9nT1rttd93nyxfLBf9M3y/OTZc+TLV0D6sIMHbRWDrVq2Rpo06cB3yEhGv71jxy45xsNjgbELHh4LkSdfQdy4cUPORevg0mXLMXToCCnLgeg1u+eB97RHr77mt96ozH2eBwoXKfrF8QzLfsk7gBpw6++mUTdZsh9l19q6bQfQfdS31LFTV2n3pElTPtlNJVWCBIlRp069T/Yxw3qsxe3nL15i8NARuHvPYs3ndVsLDxQMs2TJ9kmfQcXv6LETfD3H1OmzPrvPOMB4foxtLr8maF+4eBlFHIvbjA9TpUiF0qWdrKtBnTqu6N9/kJnH94CCCz0XjDR79hzUqlXX2Pzqku/XqjVrwW8aE/udnLnyYfnKVbLN97xRk+bynbevzMnZBYuXLJVsultae9rwXhkeDvbHGdt3OJaw6x+4j/0A/6wTXW/ZDgpFTNu374BzuQrWRWw4+3YfbAr/wo3vtoDw3Dt37UVd14Z4YTVge+gjVfNjXq16LcSOFdsEXLNmbeTNWxAXfWIyNmzcLBrXrFmyoVOnznI5/Mh06tRVXEz27N2PPHnyY+iwETJAWLlqrUi17dq2l7LsnGguHTBwMFatXgvn8hVRrUYtPLQaGLEDtxZA6PtcoWJlZMqUBYs/Y0Kkewq1uSWKl8KKFZaHyGDNTrl0mXIYNXqskeXr8viJEyharMQnpmMOWOo1aIzDR4+Jaxg12NYmU0rB7ESoXeQH3xjM0X+ybr1G6N23v/jZ0x2C6eq166hdpx7SpkkPdp72ycJoE7p1cwM1BTTpG+4X5MYOpHYdV9MMyY9f8+atRPNGsyBdR2jmpjbBiKUZMWoM0qfPiC0+neCKFStRtmw5OTXNj8GDBhdNQ/8BgyWPQhc1H5OmTMeJE7773G/bvkMsB9Omz8TFi5dw8dIVFC1aXEyarIQCEM9Rv35D8xL54tV1rY/cefJh9px54o9uaAyoNezSzQ158xbAnLnzcOXqVRw7fsI8dszYCciRIzcqVapiDu44bO7arSeat2yDU6e85LkeN2ESokaOgnRp0mHYiNE2ne3lq9dRokQpRP43IsqUdYbX2XO4fPkqOnTqKh0SO/pyzhVtfFpXrVmHkiXLIFu2nOZHhNqouHHiIVvW7Jg241ON0mmvs2a7r16/gaSJkshHpp9VR8oC6zduRvQoURE/bjwRhJjn/eYdihUtjgj/hDe1QatWr5HjGzRohKfPXyJzxkyy7TF/AW7evoPoUaKjfYdO5jmNlcOHj4qJe9/+A/JOUgNGTaCRqJ1r176TqXRYs3Y9ChdxRKFCDmjWvKUUY6xQ+nQZ4VDE0YwfoJtYWafy8qGnG1utWq5iddx34KD5zBnn4JLvBIU13j8Kvgd9iWegUEF3MlocGXfB+0ALhPGROHP2LGrXdsW1a9flw5ApYxbkyJ4T1KIynTx5Clmz5QRdK5g4CKlRozby5S2Azp27SR7/Va9RC6FDhBJL1IqVqyX/1Okz0sdkSJ8Jo636CVoN+EE0BC2zEkA0mh07dcGWrdvFijZi5GjZzQGeo2NxEXInTp5q9gesgxqs1WssLmS83q3bd4qGq1jxkjh3zneXFS+vM2jUuCnotsA0ddp0dO7cVTSb9eo3kvvim/V64OBhSBA/IdKlTQ+6rjG59ewt79bmrduQPXsu6c+s9BSYPccdBQoWRslSZXDOF79u9iNx48ZHqpSppE4OzPr2HYBdu/fKILRJk+byLK1bvwnlK1QC79HQocPk3By4tm7THtSqrl2/AVmz5rDp/+jq4eBQDFOmzsDhI8dEkORzS7cePi9ubr2RJ29+6UulQlE+PJfnNFfOPGjQsDFo3WQ6eeqUvA+nvM6Abrl58xVE8eKlkDhBIhQrXgpr1m4wqjCX1Lg2bdYSjg7FUNSxOG75KEKmTZ8h1smz5y/IudgXWPO+c/eu9A+9+/QXTTMFentBic8IteAJEyREXdcG5vHbd+5G1Wo15b2g2zATn5PmLVojVszY4i7NPLrMkiXdQyho00WYbidG4vtSoWIlZMiQSe6hkT969Bj07GWxLvD9Cxk8JPLkzodhw0eBz0fa1GnRoEFjUynAb2/9Bk1MjkY9XPJ7xv4xZ8488p2j9ZjpyrXroHU2aeIkcCxaAkuWWt5H62Np3aX1d/qMTy1n7MM5TjAEeD6DqVOnRb8Bg8H+KXTwEFi4yFOqu3T5MnLkzINtO3bKtr3gXreuK8KGDgtaOO2T+/xF8m20z6ebZY2ate2zZZtC0chRY5EvX0HR2F+7ZhF09x84iG7de8qz3KZNOxQq5IgDVoIVFW90ba1brwFmz/VA4ybNwfgY+8SxEwf49EYpXNgBVJgYiYPRLdt2YNqMWSLA0I2Owh77R7ojHT5yXN5T9pGGUqdHj16YMHGSUYXZ9xgZ+/btR6nSTqZQa+Tze792/SZj02ZJBp279rC5PqPAzp275JmgsoguZRQU+G2gt0jZss7iVeKbcMbj+R0qWqykjTI8ZfKUKF26rFG9LCtXqSYhA9aZtDZUrlzVzBo7dhwqV65mbn9thX2vfRo2YgyW+4wdGRdZq0598z21LutSuRomTpwsWcYza+zff/DwF13AOOYl/6xZs5sxXBzvnT5zFs1btEG2bDkwf+EiozosXOgJ1/qNTNc0T8/F4lVhFPDyOiv9x4iRY3Dp0mVR7hr7/Hr5QwIIB6jVa9TGuHHjsWLlKol5WLZ8pfkQUJpLliwFKlasJB+SkaPHYt68+fKA8wNZtFhprF23AUeOHAE/1tQiHz12AjGix0LUKNEwc9Zc8CHnILlW7boSQ7Fo4SKkTZtRbgxfIHaU7IwMfz2ey6lceVPr0qpte0zwucEcII6fNFVMp/wYFSzkAD709omDe770FG54rj59B5gvJTt0+oZy4PKlRH/Z4sVLgwHJ1un6jVvIlSsP8ubJL4MO+iHmyp1PXmJez+Rps3D46HE8evRIhIAhw0bI4WPGTZSOcOXK1eBAgenRkycYO2GyDCwvX7kiHfb8BR8fOJahabZZ81YIHjQY+GAtWrwUe/cdgLvHArjPWyBa+mbNWsjAheX54eWghx+IosVKYc26DaDFiIOPBT6DMX4ws2bLgREjRko7li9fYb7o1FRQm0/3PAosvKfuHgtFwOFHji+MvVaLg9LBw0aKxmXq9BkoVKSYDBTd3efBgR9vHzPqhPETRRCSk36AvCx9+w0Q8yXNnHR5o5RPyb19xy4YO26iDCQbNmyCxImTipmdjNk5Ll+xWjpoDqIaN20uVdIFauu2HThz5hxq1nbF1avXQYHauZwzunfvIZqNj7p9iJZt0aLF4lZDIZIfMD5jHHC71msg7SIH46NNYTZjxiy4dOmSmGLpj3nmzFnRqjdq0kw+HLdvW/yrpUG+/DM+vrly5DYFf4Pnhk1bEI0CSJx45sCKFo6yZcoh3N9hMWv2HMzzmI9IESIhTuw4praFg+tcOXPJexQ7Zmy0bdveRtAymtGjZ2+EDhlatI8nTp3G7DlzRVA+e9biTsgPPIVnpsNHjiBv/sKgq6aXl5cIXLSUUeNLJrVq1jGqlffA03MpOAipWbMONmzYhPMXLsClcnXs9sVnmG6NHLicPu2F9u07ihBoVuazQsE8WZLkCBYkGMZNmIxTp71QIH9BicNgET6LFJSPHj0mVlM+y+nSZTQtSnQDYQfPDzffBQow7u7zcerUafAjRjM7B2/0EabWk4N4DgDYNyRMkASbN28BBbbkKVJjw4aN0irG8dCv3zcBpG//gQgeNAQ6dOqCZStWgf0TPxgcxJLv/v0HkDBBYnFzZWW85xRA1vgIIHyv6RJEixs1vMVLlPZV40b3hChRomOHz6CrqGMxue9UFvBZ5ICZz4l9onBAqzCtm4ZlgYOl2LHiYMFCT/kApkqV1lRK8B1bsGiJuBXQb71c+Yp4+MjWIkQLVtt2HUUJwEEHFSMR/42IBg2bSJ8h9W7ZhhKlnHDk6FGsWrVKPra0nFC4jRwpChIkSCS8GLyaMlUasN/lgH3w0JFyblrlHIuWxLUbN8UHPGTwEHIvqckdMngocuXKK+V4vXQ9HDJ0GCiktWzZWr4PtMZR0xsxYmT07N0Pc93nY85cd1Ee0OWR95TtsU4cPNWt11DOx+sqXLAIhg0dLt9AWsBChQglgsPxEydRpUpVUDHHxOeioksVGXTRNY31V61a/ZP+ks/ZHHcP+T6w3+FzSNeRLt16gn0prQK5cuZG69ZtpV4q3dKkTY85cyzucyVKlMaggRblEAcrQf4KilYtLRpgPgMcfFFZw2sLGyYcOFBhv5coQWKUKVVG6uT3j5Y/9rG0JFPgypkjtwzypQAg3/aVq1abCgkjn0quWrVdxTWa7zC1zUWLlhDNLAdh9L8vUbK0KObs4xTYfzsUdkDY0H+b3wWjXvslFW6RI0eFm1tPnDzlhdy58iBI4MASD8eyVA5QgPpcopuRcW/YB/C9YrzMvHkeaN++E2bMnCWWqM2bt5oCD/v4tOky4LIvkzPQeki3ulu3bomwzneb7zHfPVqdK7pUln6qRfOWMt4x2tWhY1cRfC9cuCjKQY6pfBPoKVTQtejdhw8iIFOwNBJdfEePHS9Kn5at2olCl/eBlu+/AgXCvPkLceTIUVHm7d1riSGpUrU6Ro78KJgadRlLvnMUqJjogksL0LJly+UZoPDCuEN+w7nkM2qk6tVriVLT2DaWVIhFCP+vWFgpHLM/Dvt3GPTtN1C+VRxQM4bRt8SYGCobGedD6xDHOjlz5rJROvK4hg0bo5yTM5YsXSbjGQo41arWxGErJdaYMeNQ5TsEEPv2cHw1drxFqOC+K1euolLlaqDnAS3WPPfKlatAZW3+/IVs3NhorWOMChVbX4rvo7KLimx+1+iOy+8MhQb2zWPHT7KMsc9fEAUJFV9MizyXyDFGzN2iRZ4oV87iuUKhc8LkqfIO0iWRVunPWS7tr/dXbP+QAELNrMVSUFK0GLRuuPX8+NCzYQ8ePkaa1GnR3MrExXx2sLSgMM2eNUe0FBRGmFq3boeChYqYkjU14dWr15R9fGn4QTEG2gzGpc+9YSbctn0nIv4bWQYWPKBp81aY5BMDwsERX6ply1eIZM+Od8KEjxK+nMDuH82edMGZNm2G3Z4vb1LTX7p0OZyxE0AuX70qJmIGBDPxA8LBFDsiDjr4cRg3drwEGKdOlQZZs2SXcnxQGjZoKIN048wc8LFDW7x4Cai9/idMOLRtY7EOGWW4PHb8pAySjAAomsr58WjarDmoDa9apRqiRIoih9B/tnSpMjh29BjOX7iEY8ctsQ/FipXAooUWzREL0vJAwZNp+fLl8rGUDQDt2ndEUx+NN13S0qXPjAH9B8ju+w8ffvJB5UeaGh5OakATs+HGdvLkabjWbyxWHh7MD2sZH0sLLUXUXhlBYgw6K+ETUEXTfeTI0bB+veV5YsdYvGQZMW1z0FPO2QU9uruJwFzeuQLCh4uAV95v0Lt3XxEmLvtY6IxO09W1vgjOxvVZLzmA5WDUSBRgaC1iZ87Up3dfUJPGxE5u1SqLlpyDvCyZs4HaLyYG6HGw9KV0/ORpRPo3IoIHC+6r7zUtIBRAEsSND2rhmV6/eYvy5Svi37D/iKWLMSrO5cqDM99YpzlzPcTvlcI8LUa+JQ7aqOkeb2Vp4/tPHWeEIgAAIABJREFUgYWJFjj6zTJxIGNoLznY5YeO7x0TPy6Nm7TAaZ8ZOLhtpGrVaqJ6tRryoaTw7JugT7M0By5M1NhxgGefOGBisCWVF4aJevmyFWJ94TaVDHx3DAsCj69WvaYE8HOdriOGJpDacj4zRmKnzz7GECxKly0Hd4/5snvr1u0S5M8NDlwjRYwskwBwmwM6sYD4ElR84+ZtlCxd1hTyWZ7aY2vWtOjwI2tYRZ3KOpvPOANQ69VrIJZj9i28j9T22yf6RDds1AyrVq2RXXQ3oUKE7yATn0M3q4GL9fHDR4zGoMEWCwTz2QcYHzBusx9eutTiTlCpcnV07NhZ3Fn40f83QiTTKmddJ9vYomVryeLgsmSJUuagj5nsR42A26FDhiJjpixmPBYHMlQ4GYmxdbTG8r1t1aa9TIhCYYDCKC1f9JvPlj2naZHn/c2cOZs8pxygFXYoZlpDHz95ilix4khfzPr53WAQq5HoCkxh0jfXL1pO1q6zCJ0nTpwQTWSr1u3kUA6KU6RMLcotZnCWp7JlLG4iHAxVcKlsnAIDBgyUQalxv80dgEz0wm+bkapWqyH9p7HNYNtkyVKafScFalqMevWhBf2jUoxcqC3v3q27HMoJWygcGKl7dzczhpLa8ipVqhu70KZdR9Cia6RtO3aJtYUCIJUJn3PhovcABWTebyYqqaJFiyGCB7epFKQg5lu6c+8BsmTOihhRo311oo+2bdrJe2AIvnSz4XthCO38zjkWcfTtNFi7br0EexuzBFHxQP/5unXro27deijrVE7iIJhHBaVhpaJQGTNGbEya9GncKb/T9ICgOyUFXAr7tBjyWeM7SOUQE8txzMNEF1t6khjWDPZX6TNklmdZClj9oxtkpy4W6ywHuFR2Ge0aNGSYuBfze8P3nwoFplGjRiN//oLmYNO1XkOz/+czT2HQt8R7t3HzRzcwPgculaqiceNmoGsoXXto9WagMy1w1m6YVCbTq8I+8fvC66byiYnPYqZMWU03RvZvTRo3tT9MtmmdZH9OYTZVqjTmeNG+MJUmyZMmF0Urx5S0QPbq29+mGAUQa4uIzc6vbNB1mFYoa9dovnu08FNh1KxZS7FAt2zZRizw9FyhoslIjG2qV68hYsaIiYjhI+L4iVPGLnPJ+1y+QmUbzxnGhXISBD5ffCY5EQ2F4RjRYoiQw4PXrF1nI4B4UgBxcpZ6eT/JvpdbL7x85S3ulIa3gHliP1z5IQGEA1pqqI0ZKm7duoPFS1fYPGxsc7eu3eTF5/Rx1omDCPqhM3CIGpuNmyxTAbKjpzbKSKVKlpEXltsUXKhx9Fxs+dCxo3Au72L6TVLzQPelrVstL0eLlm1lIMBj6YpSp3ZdUGverl0HmbWLnd/XUr/+A9C3n+1D+rVjqEEqUbKMjc8kj6EWI3+BwuZgjIMyWkOYJk6chGRJk6Nb1+5o1aoNOnfqjAU+Fo3Tp0/j3wgR8XfoMNi12+L2Ur1adZQuVVpcTagJ5mwgfADtEx9ium/xBWfiB50D/p5uvdC1a3dxf7P2i+3cxXK/OLMIP9L8oNPfmPEORqKmmh85pqVLltlooRmAz8GJkTZv2YJQwUOJRnqvLxptlqMWPmb0mBJcbfj1rlu3HtVq1DYHR1u3bkM5n8E+20VTNgd5TA0bNDa16nxx6jdoiIGDh8g+Bso5FLUEW+3bu0/O0b59B7Ru1Qb8SJH76zdvxH85Z/ac8qxypigjubhUwbjxH7eNfC63bdsOx6LFTasfBWAKkUZsDWeuYodsJA746Fdbv0EjJE6YWJ4H7qO7giEoG2Xtl9SG8gNK32sjUSA3OjtafxjPYbGAWKwSL197w9GhqORbx4UYx/MDxfbRHY9BpSVKlJRzcEIJ+0QNW4lSZbFw0WJzFz8uhrmaAghdRozEQe2EiVPEbStN6jTYuPnjLDUMrjV8XydP/SjcUwtco6YlUPJLs6zQvL10+SqUdy4PZ59O1DivsRw7ZhwKF3bEG584HGrHOaMOg/qZaIkxBiPc5gdv8JDhso8aT0MA5QQLrq4NJN/4V7JEabjPtWiU2R9RE2wkWno44KQWiz7kS3x8fCmA8CP5xvujNtA4hlbFBo2aYtcuy7tNIbqIQ3FzUMJye/fslYBFYzBCbTotLUwMDKW2lq6WnEGFs7scPfbpzH306+Z5jHsobbJyU6C1lH++pd59B8gECMY+9tHUlDHRdY4zfFEo433Pli27DIg5KyL7pilTppl9tHE8l7SqcIIOI5EZB+LWif0dtaAc/DGAlAoVJg6WOnTsIuucNYnCmcd8yyCOCilajOLHS4AdPlZu+ljTT5zunkZyqVwVo8aMw6BBg8Rly8jnku9bn94WhRoFGsOlgvtoJeF7/7mZj/gcDxw0BHSTpOa9S7ceUjWfjfwFipiTkFDpwUEaU8UKLqbVgtt0BSEP3xI1oFQiGbGIRYo4yow6RtlDhw4jb4HCYtE18ipVqirvNq0V1okDoy6dLRy7dnNDtx49rXeb60OHDhdrFTMoYDRq3Bwe8z9a3Okew28ereB0RV1g1U+YlQCo5FJZXGqs8yjMDfbpr6mkoaLJ0NRal+NzVrxocYQIGgwnTlmUENb7rddbtGiJYIGD4MoVyzT8dHVl/7lli+WdyZA+IwoVKGR9iKzT6tKseWtRCH6yE5DneLb7fJsgdKMclRO0EnftarnfRr6xZB85dsIkscbSDZWJLuwlSpYVFyxuc9bCUj6WpnHjJ0mcreGPT68Mjh/oLmOfKBRTycr4Qya6+U6eYhGE2vm41NKt3bVBY4lFZZnx4yaAz4WR6J7OiU6Y2K9/TgCh+zxdi3xLR4+dxJJltrFp1uX4Hk/1RaHLd5buo6dOW6aGJSsKqoYiiO9j82YtrKsy1+mW2KBxM9mmJYBuq9Yu10ZBTlzA8ZyRGLvk4OAoAqARAzNmzFiZ4MAoYyz5TtMVnApNCqEUiOgybiTeI7r/2U/MwGeCHhX0jrBOVHo6lavoqxKcLqYcC3ISGfvEqc8LFSpiCmbW++cv9BTLaKdOXdCpc1f07tXHtPhx/EYXLEMg9/T0hFM5iwDCOujJwj4zTpx4YAjBf5l+SACh4MEBiWGSNxpM/3sjXoBafUJkUBQHvYY2kmbL/PkLm9Jf+YqVTQGE05AZbkask1o2Q8tKabJU6XLY6vMRoZaVQhADgZj4gmXJlhNnzlg0wN3deovJi/voLmav+aXJiw/WlxL9W+lL+j2Jgg0149YzZfB4ds4OjiVEM8htCl38eDBxWlV2RNbp0pWr8kAzuPnhoycYOWKkaO04Kxf9cBf4+LMax/DDYMSHGHnUANZr0ASGZp9uIm69+xm7zSXjLoyAYQYo0b2BmhomDsIpEBiJvsZ0rWDasnmLWIk+7qsh95rb1AjRXMwXvWOHjhKrwhfSOpEJ3RioDaLWL2mS5BIHQoYtWrUzrT6cq57Txhm+ujt37RZfe2orGjZqahPQyGumD3jLFq1FI0DBgIkDvNx5C+CRz3SizHv67AW4ny6FHKTTNJktaw4ZkHI/XW5odmcy3J1kA5ABID8WhmsW3R3KOlUwZ9WgqwNd3Jg2bdmGylVq4OLFizIhAU3MV69aBgNNmrUyB4W+mT459SE/njlz5AKFCiPRP5YfDaaz5y8iWuSoSBg/AW7d+WhVoIaVFq43bz4N2GYnzXrZqTKdu3BJtqklsk8UEvPkLSAmYmOfi0tVMx6KwaPGc0W3K8bF0MLGxI8H3TGMRB99CiFUYtDlzeDKjxAT3T7Ix7cptJnHuCdyonmfgoRviW4vvI+0+DFt274dhQo7mgoSl0pVsNtqykf6J7Nemr6tg745sOesY0bivgwZspgWEMYE0F2IiVYs+pyv9rlW8lq40BJHQgHa+mNv1MclB9DNWrYBg5qZOJjjR7RzF4tLG/PYP7D9xmCkapXqphaMrkyGUCEV+AS7GuvGkn1z85ZtxVWReRTAKBQZibE/3Xv0MjZtlt3deoFutEZq2bqtKYDww+zoWAJ79uyR7wE/uPaJ7kwfzDfFspcfyVatLK5CzKEwYj2zFvt38iU7WvPYPxqKFH5POnexCMpUOpCXMbg0XBn5DNASQIscrZ+cAn3tWks/RusIv1+MNaI7BBU81vEYufPkx4T/sXcW0FEkWxjGJciiiy/u7rK4u7s83J2FxR2Cu7u7uzvB3d2DOwlJsP+d/870pDMEFlhmidw6J5mW6urqr7ur6krdnmIRLKntN/oc1pOacQ6WjEGL+Vo5eKD2uGMni9WD/R/vDxOtoLyHfOaZOKhvYJ3wygh15kmzDOFOi4g5TKgcxMHC+o1o2doyr4rbqPVsaGK+Y+dOcTUz3icK1Gx/2rRuI/ebWncjMTrOwIHOskrXFbobmhP7KL5BHIxyEi8ThXPeY7p+MRltFgdkf3XoJINT4z5JBtM/WsbixY0vQis3U6jgfTGiVbq4uIjLiukQH4ucE8o2q2pVb6ukkYHjAEPA6tK5q+R7/tziImdEADRcicuXq4gSxUsYh9p+Fy9bIW2psYHjG078NxLf1ZFjxvt4Vox9tNjFjhlHgrAY2/jLeQJUEnD+BhOtzLQYGqlshcrYs9cSUYwhtmtYLUAbNm4WgcOYSE+Fa5asOXy1gPCeUFDnRHYmHpsyZRrxSDDuN/snvv90MWeaPNmnAELrlzGopjWa837s0/qNm7F2nffcWOPeG/no1TFn3kJj9bNfWh44F8o+0d2Zgqhh9aaQUrZ8JZsAwjlDtMj5lui637xlG1vbzrYwQYJEn33ugM8v57yYExVEIpju2SubKQDS/c4+3bv/QLjRksB60GPDsPIzL719DO8N87FUyLRs3Q483j7RUmxYvuz3TZ0+C9NnzLbfLK6+fEeNd8+cgUpMzh8zJ8PDgO8xx1NG4Au2/bzHTEb/y2Uqpml5+t5wyOZzfu/ydwsgnNnPlyhN6nRiMqTUyT9qhjgxjn5wHOxx8iC1eUycp5ErV26R3KihYyNErSoHIXxYKPHeuXsX1WvUFO02Xzpq+1KnSgNG8qF/M/OmTJFatGR88KnNSpc2g7hkUSDiAIJ+zkwUVjgxnS5dbEAoDNWr31jmlVBzxQaZ2lAzfB7HOrEhuXL1qlwjNWTGhwM5wD937sJn0XDkhNZ/FGio6aZ5ja4ORoPA3Tt27sIffyQUrTDX+bDT/5MRt2hqJ7uOf3eRDvPAocNi4uakLpqk6W7Gjp5aZnZAbFTYCWzeskU06RxEc8BvDFBYPpnRl53zTMwT6qnp4iRPCmrkRp9+Rkfo2KmzDCooDNAKw3jfTJxcx3kWFGJYXriw4UANEgdt1ATzHhkRX/oPHITixUuKv/y+/QfRoGFTHDx4WF5eNn7GIEsKBkQbw0lajFLCzohaNPp8cwJ8hUrVJAIVBwYMh8yGgoNxJk4unTlzjviF8jkwhFAKfcOGjxaLAl0cOE/D7CrBia4ctNM9gqZ1Wu0oMNNFgcIv04RJU2TCP5dpNaPFhANPe+GO4QYLFiwsUXEoGPE7HEmTpRDLCDvt2rVqg372NEGzfA5AKeQxXnnokKHFZYTnoCWhceMmuHbjps3iIxUBcOrMWURwCi/XTksFhTXO+aEQmiZ1WlSpWl0GAJy7Qz4hgwaX54zn5PPNyaHcTqHf0J4bZXNQxlCW1avVELcnDpYSJkzsQ9tr5GUUrcyZs4AuaeTFuVVFi5WU+jCiDucH8DnlfaCwET9+IhEkOdjiHJxx4yaI77FRHsMfsj0wJ7ob0I+Z7KgZZlQb+1S0SHG0bNFKnld2FIx2Yh9Nh8fMmjUb0aNGl2vhPApaVo3BNfPzmeWgx/z+16vfUOZS0bJkJGqc0qfLIGZ9tm/0keVcDcNETQssTftPnjyTjoZukxSq6Y5Hf/9BzoOlE50xYyboVnn7rk+rK8/P54gD06nTpttcrGhhpAsX2xJOmB0xcqytQ6LfPes0bZrF15j+6JyEycmpbId5bkajsU+0HhQtWkLuBduyiRMmIknipFJfCicUpvk8GS5Z5uPpAla5chVx67l+8xbKli1nsei8ey8CNa0Ng62WYk6Y51wdcqdGc89eF5lLZi6Py/TpLlu6nLjj7t6zHyVLlJT22hhwUdPIUO+ci8PJwwniJ8KqVWukHaLihu6F1JCyj+H5p06ZKt/NaN6qnXTQFDKoCDpx8iRevXGT4BjUDpIR3U0NiypZ0KpFzSK1f/SXplWS7QYVBnTvomXGuOd8fmhdY9tiRJUxro33IEKESFi5cjUYmTFPnnwSQINtJwde1DBSk81ElxLeR87roMKFgQ/IhPeXbS/bCFpM2e6bE63YbHfWrFknGnQOFpIkTSHPMwNuDBw42BaOllbxpSZLBec9cJB87doNuR6en64Y7CMZDSpt2gzo1r0HGKjh6rVrMq+JZ2/Xth2yZcluURYB6N6jj7xPVGJQecXE6wjnFEEGveb6mpfJi1b/ShWriCslvyVDAY/vDPt0DnoZRpVR4+hqa58okHGiN9s0tkXsyxm4hO5F7dp3tCkZGUCCefhdiOs3b0ugCK7TEsJENyCGQDUS3S7p1sznqGK58sifN58ofDjB2dxGsJ6ck2kMlI3j+cs5EKnpj2/n7UHFKwXp4cNGiGsarU60gF+7dk0+Csr5U4Y7ON2VU6VIhfsPHggP3i+6SfOZZXAKXgPdZYwxiXF+uhgyyAzdQA33tiyZsorrD/NQqcHolJyH6eJyUCz+bF8zps8o/RoFK7bHdJ3ic07vCMMawuPZ19NrgG00w9bSm2P9xk3YsdNnO8Nnk6Hdv5RoNTVbIY18dL1LmiQZXA4cEIGNLnMpU1rcCHk9BfIXFPdwszsXj+Uzx2eIAh3nfxjvSp3adWVOCZWpHFeSX/58BdCwYSMJSsP3hBbb9Okzyhwk9u/sHxnkKGuWbPJccWzE+0xFRuvWbW2CtlFn/nK8RWUo+18+i/TS4TtHF3LWm3Wi1YpKDuMjnjyGynJGbKNAzneP4wcqZzn2ZJ/D957CnG+J47+KlavJXEv2SRQueC4eS6UXBSNeLwNKbNy8TdpIzo+hUpb1YxozeowEFaEShccyoi2Dk7DtbdW6vQiwrBcVCd8SldK3en7rtu8WQGgC5gQaugDRL5QRZerVbyQmM0Zv4iC8Vas2opVyc7eEtuRk3N+jx5DBFxt8Ttzk/IdOnbti8tTpokWnD1/5chXA+MRHjp2wWDSyZhe/dUYOoR9yrpy5RFJ2dX2A8xcvitaME6z4YnGAZiRGMOIH8jjQMW4k3VTKlKkgE2mNmPZGfuOX/vAsq2ixEtJYmcMVPn36XCwwDMv6pURTG/3Jad7NX6AQVltdl2RyYuOmyJgho4SzpEWD7mDUrtIVgYlcqM3n/AC+qEa3wwaW/p2MCmZ2UaDvIwd95Ej3EcNtxKgbfctZXhYZODazxVZnY8Jz5syVB/UbNLG5pfB7BoxWRX9OCiNGhDNq0LiNJtG58xeJCxsHeOz8aVLlYG78BAsTuvVwIja1m0wUHizzeoqJidKom/FLLR0FVt5bulVxgqmRGLOdAxDGaecLzsbRsBpwgEONCRtJ8Q9t0178gtlZ9OvvLM8fJ+LRikMN4XGrdpll0zWCJupatevi3j2LPyx9K6kB5KCa81gMQW7hwsWgNtQc8cmoH59zPrNdunTH5as3ZK4T3Y2oZWH0FM6d4YRxDgY5aOHEUkZHoxm+das24trGsqiJp+Z8zbr1nzVyjJCSNXNWeQfo2kYhle4n9F/NkjmrDPI4yZIRh8qVKYcihYvI4G/Hrr0yIC5VsjT4R60JJ6rbJw5eOPDiwJONOIV83xJN7hQyGPWGfrwcAFLgZaKbC2Oqc1C0cvVaEXwp5NK/eNiI0TJHjO8Uw9UaiY02Y9ebQxVTC8gQjsxLv1bfrJOcBMxOg65Cs+cukHrQldM+8WNifDYZbYnP1cBBQ2wdMydPpkyRSqKuPDJ9VZgaQpbLIBDmRGUG72XOXLlBf2pzovmfUeEYgIODeLrIcWDD+Q19+/aXAQAjmtCKS4FxpNXFwSiDwSS6du+FnDlySQANnstIdEuhgoUWx23bvV3Y+K6lTp1W2l3DpM5vEVC4oBadkarMgyaWx4673wBneR6rV68Bl4OHxeUndarUYqk6ceqMzAegm5M57LNRF3aqnN/E62Q0KD57tD6xDdq6dbsoJDjvzdC4UXHDgTv9n48f9/3L6+w8GXWKlqO58xciR7acIgRROcTEdofubxzEU3jk/AEO3hi5jy611N7Twsr+iANpWqs4N5HuVmwnef7Vay2RlGSA2qETRo4aKxZduvkY8f95LrZltHbSd96wrHA7Az+wbA7uGCDESCJwV6spwpexjb+UFagkYZvMyGnUKLMv5FxCWpI5kKIVjwJ940ZNkCpVapvfPeei8Lj27Tuhb39nmcdCRkY/YJyH65z/xL7XeEfWbdgkAwyGBWffZ/QFbCt4XiZaMRn5L1mSZDKf58ChIyhTqozMU2EbzETrJOd6MYId7yETlSZ0hc6YPhPmW+eIUWDiJH4+U+Y+csnS5bY5UXKwL/9oEedzyuhl5neXChi6+xYqUAjFipeUwbQvh8smvndsc6pWrSGKHSoCxFpjEtYGOlu+mcS+jlHmhg8babtfrvfvi4WAAj4To0TRmyJnthzIkS0HGEWJ993ekkMhiYKRveafZfD542Ry3xI13VmzZpf2e8q0GWjUsIkoaOlmV7hQYVGqcG4GJ2NnzJBJnlOWQ2GU7TMHj0OHj5R2kRZ5Q5PNPPxYJd2sMmXIjMaNm9usM5yLxEE0E58ttu98b+gCzKAVdEmiUMm+kX0DI0KxDd+9d59cIyM/GpGnOIZim0llVYe/OonbNT8pYG6vpC5e73womuTk1n8cOI8YPc7WDpv30W0tebLkImSxLnxP+Ldm7XowzC6jyXFcROWKOTF4Bdt35qUF58lT75DljPjGa2rctLn0WRS0qbSgopr9aN68+aV98Xpv8Q5gwBzOzSlbppzkp6DbpFkL6W84b9S3xKirnFdDhUSDRk0sY78ixaVP5BiCln7WjXWk8paJc/E4nsqXJ598n+b02XPg+JZjObZZtGIwcAYFgy8lae+LlZRxKkP+Go89PyrM6HxsByjY8zkVj5CiJWSqA9sGKq+bNW8h/REVakynTp0RzxWx6C+zWPTZjjZt3losvl+qx8/Y/t0CCH1PDTcFamQZf57aVFoPjMSXlA8vB7tM1DCwgzZHDaF5k5I3E030fNk4UKf2m3HCeTwno/KPy5TE6FLBBowdBs3p1HrwWJ7fnGh5YMNIc6NZk0IfT0rEvjUgPJ7Xxuuh5Gs04ka5HCxRq8XzfSmx3rxGSuLUJBqaeT6M1P6w/hwMUJvO+rHxoBRvJHYujJxgtpxwH+tCLRr5mBM1Idzu2zckKMGyruTOc5gHeyyDkjbraAy2jXLZMVMYMieus16GhsHYx/vH6CXma+C6mRFN0ObO3jjW/Ms6+KZV4nNmf2/ZKdCaxEnoNN1SiidzxsDnc0YtGbWmjJPNXz5XHKwazyzPS4GA33SxT2TC58ucuG5v/TD203LF/cL6Nb+R8kEm/zF2Oy06/DPK473lNX6yugWZv5/AZ9S3c/B6jMaFy3z++Gd2/+B7YbjPsV48H7Ut1NZZTyXL5vfAqD9/+QzevXvXJnCa9xnLtBbRlEwNCp8Zc1lc5h81Z8b7zOPYHhj5uN2sSWGAhhcvfX+PfHsOjHrwlxN9DYsbn0c+A/bJ2XkQOnexuL4Ic1MG451g3ezfJ2azH7wbh/L5f//B53vBfdRw8n4bic+bocXn+8/7QQ0Xz2Xmw/xsx/jucB/rZf8M8B7au7n6rL+3Sx6fCT7XxrmN+vCX75dxHl432w3Wie81f1l/1oFtk/ndNZfB94ptFtsaKic4CDLaabbZvC+GlYDHkbu5TzCXZSyzTNaN7fHbt55SFz7n5sRyDKsUfd1573luDr5ZX9bfcn43eHp6f8eHxxmJwi19+6mUYTthHjQbefjL9998L8mM7zCZGYMxI7+5zTO2Gb+0xJrvG+8LubINYH3ZJrKdtLTN3pG0qMCxb++MMs2/bBPsteDs6/iMmhPbBaNteOvx1nYNfC55LrYPrI/xnvJYsiE7o+8SDS3vvbUfMcrn9RjzUOQ4Ly8JvGIIRUa+L/2yrizbSHyW+K6wj+Q9MrcXRh7zL/PxWSBnvie+JfYL9goFIx8FaWqMqS1mMlyTjP3f87ts+QqZ32NYg3w7lhp2tg1GstT7qbTvfL753BljHvM4yRgT2I9HjHL4/jA/y+e9tm9DmI/fNKKygm295Zl4Lu8q33/j3NJ3ubtLH8pjZs+Zj569+1o19Jb+jO8rw93zOvj8fGtiOOsGjZra5izaH8dnmc8X2y4+V7wWy7P5Vt5xyxjQ08fzxjL4vLBN4DPH67Ift7BNpDWJzwn7CrZxxjqPMyf2I9Lf8rttjx/LM8i89u+Z+Ri2W6wvz81+kWMOHmOMk9jHs63iuYx3jM88x6G8PraZvO/cxmPJ1XzvzeeyX2Y7wXbFvr9ifbnd6Be9+5Z3wo/n4j3ndvM0AdaDigU+T0xso8iP+RyZvlsAcWRlvqfsnTv3yMdVvucYzev/CVBTaf6gEK+IYXMpOFILZrjaGFdKFw1qRs2dnbFPf7+NALUo7EDoEvgz0rSZc2Sy6s8oy7cyOJ+oeXNLyGrf9uu2wEWAH4qt37CJDFgC15X/d1fLARgt6PaDwP+uBt9/ppmz5kpgBsM15ftLsMzp4Jw3I4Ttj5ThV4+h6yoZ/ZtEy06Hjl1swYP+TVl6bMAj4C8FEJqH6MvPEI90ZdIUeAgwtCvddOhaQN9MfjuAHR+tRtSqtWjZRkzz9KOuW7e+RGSxt/4EHlr//kqppeccnKhRoglbe7P7t56B38bgxwP79XMWt78vafS+tbwv5aMWMkOGTPIBNsOs7NieAAAgAElEQVSd50t5dXvAJ0CXA7peMNwrXQNfvfJp4Qz4BBx7hZwLQ5cqRm26a3VpdewZf27pdE8yf4X6e0unJcX4HtL3Husf8tM9798o7zhn0D74jH+4bq3jf0PAXwog1Mjyoy5LFi+1RTb5b3DpWfwCAYZxZqQYfqOFfoy0fpjTnDlz5SujDLNLc6emHydA8zbD+DFCC/147U3X31oyI9SMHj1GPsj0rcf8SD6anzlZmdG0/s3A4kfOrcf4PQJ0keMgmd9Y4Bwzexcvv1dj/1Ujfihz+PCR4BxCTUpACSiB7yHgLwWQ77lAzasElIASUAJKQAkoASWgBJSA3yGgAojfuRdaEyWgBJSAElACSkAJKAElEOAJqAAS4G+xXmBgJHDlyjWJahEYr12vWQkoASXwNQKMVsS5pJqUgBL4dQRUAPl17PXMSsBhBLp07SHx4R12Ai1YCSgBJeBPCfBbCvzGjiYloAR+HQEVQH4dez2zEnAIgSfPniNl8lRI8EcC28e3HHIiLVQJKAEl4A8J8KOWoUOHwYGDvn981R9eklZZCfg7AiqA+LtbphVWAl8nwNjtwYMEQYggQeWL41/PrXuVgBJQAoGHAL8C/3vU6AgSJAjatm0feC5cr1QJ+DECKoD4sRui1VEC/4aAu4cHsmbJhigRI8pfpoyZ8fo7vlr7b86txyoBJaAE/DoBfhfGKWQoxIgSFbFixMKJU2f9epW1fkogQBJQASRA3la9qMBKgB9l/C1cBMSLGQvx48RF+LBOGDN2fGDFodetBJSAErAR2LPPBfHixEX0SJGQ6I/4Iog0b97Ktl8XlIAS+O8IqADy37HWMykBhxJ4//ETsmfLKZ1qiqTJkDxxEoQNEQI5cuSC21sPh55bC1cCSkAJ+HUCtWv/T9xTkyZKhCQJEiJ6pMiIHTM2zl+87NerrvVTAgGOgAogAe6W6gUFVgITJk5G5Ii/IUHceNK5Jo6fAH/Eji0WkTHjJgRWLHrdSkAJKAHs2r0X8eLEQ6zovyNZosTSRiaMGw/hQ4eFWkH0AVEC/z0BFUD+e+Z6RiXw0wm43n+ApImSyMTKWNGiI2nCREiWOAliRI0m25ImToJbd+7+9PNqgUpACSgBv07g7VsPlC5VRtpCul8lSZgIqVKkROzolsnoYUKGwo5de/z6ZWj9lECAIqACSIC6nXoxgZXAiFFjET9eAuTPmx85suVAwnh/yF+O7DlRIH9BxI4VByNHjQmsePS6lYASCMQENmzcgmRJUyBn9pzIlSOXWEDix46DLJmyoEjhImIZadq0ZSAmpJeuBP57AiqA/PfM9YxK4KcTuHrtOk6fPQ93D0+sXrsev4ULjxjRomP7zt1w9/DC2XMXcEW//PvTuWuBSkAJ+H0Ct27fwbETp/DqtRuOnTiNlMmSI1iQIJg+YxbeenrhwsUruHjpMj59+uT3L0ZrqAQCCAEVQALIjdTLUAIGgcNHjsEpVBiJdU/BRJMSUAJKQAlYCLx6/QYpk6UUd6z1GzYqFiWgBH4RARVAfhF4Pa0ScBQBTrZ0Ck0BJBpOnT7jqNNouUpACSgBf0fg7j1XpEiaXASQpcuW+7v6a4WVQEAhoAJIQLmTeh1KwEpg1+49FgtIlGg4eeq0clECSkAJKAErgTt379kEkCVLlykXJaAEfhEBFUB+EXg9rRJwFAFDAIkeJRpOnDzlqNNouUpACSgBf0fgzl1XpEiWQiwgS5aqBcTf3UCtcIAhoAJIgLmVeiFKwEJg5y5aQEKLC9YJtYDoY6EElIASsBFQAcSGQheUwC8loALIL8WvJ1cCP5+ACiA/n6mWqASUQMAgIAJIUsMCoi5YAeOu6lX4RwIqgPjHu6Z1VgJfIWC4YHESulpAvgJKdykBJRDoCKgFJNDdcr1gP0pABRA/emO0WkrgRwls275D/JudQobG0eMnfrQYPU4JKAElEOAI3Lp9F3FjxZE2cv6CRQHu+vSClIB/IaACiH+5U1pPJfCNBLZt3wmnME6IHu13HDumAsg3YtNsSkAJBAICN2/dlq+iBwkSFPMXqgASCG65XqIfJaACiB+9MVotJfCjBB48eIhFS5Zj+co1ePr02Y8Wo8cpASWgBAIcgdev32Dt+o2Yv3AJrl+/EeCuTy9ICfgXAiqA+Jc7pfVUAkpACSgBJeBHCUyfPhP9+g1Anz79cOPGTanlx0+fcP/BQ8yaPQdLliyDq+t9h9b+4cNHmDN3Hq5eu/7D59m9Zy9GjR6Drdt2wNPT84fL+dKB7m898PHjR9n97NlzrFm7Hqy3ffpkv0HXlUAAI6ACSAC7oXo5XyfADnHc+Eno1r0Hpk2bIZ1lx45/o1u3Hnjw8PHXD/6Ove/evftqbu4fPHgo5syZ99V89jvfenji+o1bePfuvW3XocNH0KBBI3T4qxM6d+mGI0eP2fbhk//oxszV3LxlK8aOHQ9PLy/bdXz8+MnXTtqWQReUgBL4JQT27z+A0qXLIV++gvjrr05gezptxix0+rsLli5bgSdPn6JsmfIy52LZ8pUOrePKVavlPBSGfjRt2bYdIYOHRKwYsfHs+csfLcbX4169foOBzkPg9c7Stq3bsAmhQzlh5crVtvwbNm7Grt17beu6oAQCKgEVQALqndXr8pUAh+Or1qxFmlRppKNq164DJk2eihjRYyBL5qw4d+GSr8d968ar129g+IjR/5j9/YcPcs7SpUr/Y15zhqnTZ2L2nPmy6eMnoHfvvkiYMDEaN26KKVOnYeasOZg5ex4KFSqCtWvXmw/1s8u8pq3bttvqRw1qwoRJcPeeRVvq9e4dlq1Yjec/eTBgO6EuKAEl8EME9uzdh7BhwqJAgUJ4/sJ7sH7vnisaNWmGLl17SLkrlq+U9nb79p1fPY+bu7uP/Xz3zcmslOB2toFG8vJ6h9t37oJz4J48eWpslt9Xr17j/fsPPrZx5c2bN3jr4fHZ9qpVqiF2jFh49cbts33GBg8PTx9KEp7fnKg0Mad379+jVas2yJwpC15Kfd6L0mv/gUNS30+fPuH4iZOIGzsexowZh7dv34LnMJJhNXnj5ib7jO36qwT8KwEVQPzrndN6/ysC3bv3RIigwXHg4GEpZ936jdJB5smdV4wG7LCemOZPuN5/gNemzujuPVccNVsaAHByY4niJZEpQ2ZcvHQZ9DU20qXLV3Dn7j1jFbfv3MGRoyfEPYEbHz56hHsm94Rr13z3TaZmkZ0UU6NGTaXOQ4eOsJXLBXaoOXP+ie49evnY/vzFC5w+fca2jfXjdbx+Y+n0799/4GMQYcsI4MSJUz46Q/M+Y5l8Dh85anMvYIf6+MkTvHjxSrKws7/n+sDILvVcv2ETfo8WAwMHOuPRo8fgAOTmrTu4dPmquD+8eeOGIUNHoGDBIjh2/IQMMDjQefHyFV68tAx46NZx7tx5uL99aytbF5SAEnAsgXfvP6BcuQrSBt28dfezk3H4PX/BEtk+Z85chAkRCgsXLUW/fgORI0cuLLX7Cvn2HbuQM2duNGzUBLT0Mk2eOgO5/8yD2bPngtaCPHnyoXWrNtJOLViwCFmzZMfUaTMk74sXL9G2XQc0a9YCJ0+ekm38R4GkSNESyJ4tJ/r26QdDLDh46IgISQXyF0TzZi3hZbIqlytbHokTJv6iAMJQ57ly5UbRIsVx9tx5Odf8BYvB/mP06HFYu34TihcvifbtOtjqMXbcRGEVL3Yc9O3vjJu372Ho0OFo1bI13n/4KG1a1qw55COyPD/bvXr1G6JggYIoUaIUTp85K2VVqFgF7dt3tJWrC0rAvxJQAcS/3jmt978i0LRJc4QL44RDRyzuShs3bZHOoUC+AiKAjBo9FnHjxgM7RdcHDxEndjw0btRUznn02AmULlMemTJmQZOmzcGOmKlHz94IESQYUiRNhnoNGuOC1ZqyfuNmZMueE8WKl8L5ixYLy8hRY5E9ew6MHDFKjp0wcTJi/B4T9erWw+KlK6SzbN68FdzcvQfV+10OYNXqdZL/0pVrUt9SJX23oLiLq5bFD5sHnDl7HnXrNkDGjFkwdZrFPeH6jZvImjU7UiZLLpaTDh06IXOmrNjnckDOwX8Uwvr1d0bGjFnRomVreNpp+YyMDx4+Qo0atZE6TXqw3kaqU7su4sWLj4HOgzFuwmSkTJEaHDgw3bp9BxkyZIJTyFAiMPG6T5w8LR1uu7bt5T4cOHQEsWLEQvw48VCz1v8wfuIUlC9fCb/HiAEKkUwLFi5C6jQZsN/loKzrPyWgBBxPgO3Eb+EjImXylDZFiv1ZDVfUWbNmI1jQYKhYsTJ279mPSBEiolQJ77Zr8pTp+DN3Phw/fgJdu3ZHpcpVpahTp89IO+cUOgyWrVyNnj17y3qhQoWxYfNWFC1SFGFDhxVFCg8YMMBZ9s+fv0COp0tW3rwF4HLgIAYOcAYH+HTDPXbsOIIFDYHlK1bKX4hgITFm7Hhb9cuVKScCiFnpZOyki2iKFKmxd+8+TJkyFZmzZBPlzJVr1xE+bDiECRESCxYvQ+1adRAiWAjs2ecih1JQSRg/IbJmyoIrV6+CFpFGDRtLfTdv3YYPHz6iV6/eCBk8BCZPnirXtHPXbkQMHwGhQ4bGlavXpJy1azfgiLXfMuqkv0rAPxJQAcQ/3jWt878m0KFDR9E08Xfa9BmIFTM2cuX80yYgbNiwSTqGsuUqYM/+gxgyZBjYqdGSkSB+IkyYMFEmVMaOFQd/d+4q9eFg/Y+48VCmVBmbdp4dScqUqfHo0RMsXLQYqVOlxbPnz/H8+QuECBoM6dNlkGMvXrqCqJGiyDnXbtgsFoEgQYJg9559tmsdNHioaMy4YcTIMZK3g0nDZstot0DrTYzoMeE8cBA4AZJCx8yZsyRX/foNpZw6deri8NETiBguPKpYO39mqFChEnLnyiOCR9269WWuiV3x0qHT5at27TqyK3/+gmjSxCKs9enbX8ovkL8ADhw6hlw5ciJyxEh49PiJ5B0/boLsHzxkGOjC8PTZC/wR9w/Ejxcfbm89xQryR+y4KJi/IKjh9PD0wqhRY+WY+vUaSBlubm4iKBpl2tdP15WAEvj5BDjZm20U282H1vf5S2eZPn2G5B01yuKemiFtOhQuWNiWPWP6jKBlgEqWunXqSl4qKJjoCkXLAhPnR/CcNapVl3UO2Ll+8tRpy/4NFkv2NqtLJ9uRYkWKyT7+M9ykOJin++3du66YO3e+lNG8eUtbPlogEiVIiJcmKzZ30lKRPVt2RIscBdt27BQLB89vWLdTpUyNlMlTwMPrHaZOnY4QwYJjxszZUu6Hj5+QLk065M2dx3YeZ+dBcu4RIy1cqJBieXv37bfloWWF24x5LfZuaLaMuqAE/BkBFUD82Q3T6v4cAm3btpcOj5MmOVmyc+eu4pdrlE7XrN/C/4Ya1WuKpsrY3qHDX9IZ0IWKqVKlKogdM7bNXStRgkSoWKGikR1FChdFRKdwIuT8z9qx7t69R/YnTpAI+fLkk2XX+w8t2rHMWWR9xoyZcp4NGzfJOjvOjZu34Zo1ugutEuyUzCb+t289xNLQp09fzJs7H3379sPJ02dw5OhxRI0cBdWqVsPEiZMRKngIJE2cVMqlYMVy+PFCpmiRo6JCuQqyTE1hpgyZkD5tOsydvwC5c+VGyOChcNXOPez02fNImCCRDCjou5wiWQqEDhkGz1++xqrVa6X8Af0HSJnkwS+0G65YixYtkf2TJ0+R/a9euyF71hziJ+1mdalKnjSZcJQM1n/ly1VEtCjRJNoN3bGePX9h3q3LSkAJOJgArQpOYcKKgPDw0dcDeHAwznaGc9Q+fAJSJU+JPH/mkWVWM1OGjEiWKIlYWzkHbPjwEeLC+vjxU0SPEg0VK1SSq5k9xyL0/NXB4oLUpnVbqcP5Cxdl/6JFlsE6XaSYfosQETWr15Rl3/7NW7AYNWvWQbiwYdGpU2dblnJly4HtM9sjc6JlmdvTp00L50FDxFozatQYcSelAopuWxRQvN59kEAavOZhw0dKEU+fvxBrUc7sOWxF9u83QLiMHjNOtjEYCo9Zu857/h6ty0kSJUGC+Amx3+WQ5KN7qyYl4N8JqADi3++g1v+HCNA8njxpcps/sH0hW7ZuQ8TwETF7zlwfu5o0bopgQYLC6HDrN2gkHcax45Z5GQnixRcLiHFQsSJFZVA/YOAgjBs7HsuWrQDnNfBbHTGjx0Ahqxbw2o1biBcnHrJlySaHjhs7Tspdv2GjrPO3V+9+RrFYvWa97G/erIVtGzVj9IcuWrS47CtduiwuX7mKPXtdpBMvX64C2FlOmzYdBw9aOjL6ErPD27Jlm3S2FFSqWi0gbm89RADJ/WduTJw0BbNnz8GmTVvw4oXPwf6WbTsQJ1YcGVCMGDFKNHV0U/D09BK3BpbvPNBZ6kl/6zgxY9vmu8yYMUvOP9KqAaQwQW1o9mw5xNrBg5InSYb8efPDErjScrl79x+Q45o2aYrXbu54+coyz8QGQxeUgBJwOIGWLdvIe7ja6hpqPiHncxlWDEMwoADBlDZVGlvbx3UO3HPlyGU+XJZpWYgRLToqVaws64uXLJfztWppcfPs1rUbwoUNZ7NArF69RvYzMiBTsiTJpD2RFdM/TlZPkzotevXqgwOHjyLKb5FFmDCyVK9WHYkTJoLXe+9og9xHNymWmTVzViOr7ZeW5hRJkyOrtQ2nexbbPip9mOiqmzplKuS1WnO4jVZp5mEgDqY+ffvJuovVndSYeD5ylMXi/XeX7pJP/ymBgEBABZCAcBf1Gr6bQMuWrUFrxZfSnr37ESFcBFBDb07ssNhhMLQkEyOaMKIWrQ+MehIlUhTUrlVb9tGCUKpEKZns/uSZz0H7nbuuiB45KsqXtVgbaAFhJ5zN2rHNsLosHD12XMratHkr5sy1+DVzA+di5MuTX+qyecs2yWP8o5DAOi5ZslQ2sQyu97CblE4d2t9/d5V9u61hH6ltrFe3vhzHzi9h/AQoXMjbVYI73O0i1XDSOAUXCgn2afjI0VL+IKsAUrpUGRG8Xr+xTNCfOGGS7OfAhFFqGB3mz1x/ymRToyz6mJcsXlJWDZ9yduZZMmVBpIi/4fSZc0ZW/VUCSuA/JMA5asWKFkesmHGweu063LlzF7du3waDaHTp1kuCV7A6/D4I2yBaN6i8oQtT/Lh/gO0eE10wub9z5y645+qKdes3SVtABQq3p0uTVoKAjB5jcb+sVbOWuJNWqlBJ9u/aZbF4jB9vcekca7UorFjJsLxB8XenziKkUHHx4eNHUbgYbeKY8ZbJ4bVq1haFFNvyjOkyIGzI0Lhzz/UzmiwzaJBg6ND+L3HDvXDxEhit68Kly3AKHRa/R42O2/dcpb3lObp06WYrg21Z3Fix8fCRxQWVlhzLdVvceBcvtliEJ4yfiNt378FoJ2/fuYfkyVNh1qw5trJ0QQn4dwIqgPj3O6j1/24CnAxIS0PIYMHBgS87TftEVwF2DAxv++r1a9tuRroqWKioaMsWLFiIMKHCYvoMy3wK+genTpVGtFwHDx+Bh6cn9u0/gFAhQ4nvLycULuLHuO4/gDG5Mkb03/H02TMcOnxUzhcz+u+gSxMnYfP8EydOEmvBxCnTbXUwFuh2QAEofrwEmDRpikTlontXt+49kTxFKqxbt0GyPn32XPKxPLpI0b947ryF4rddtmw5Oc/IUaNx+KhFUEmWmCFwLRG7WrZoJfv79u2P/S4uGDJsJA77MgGyW9fuko8aUUarolDAwQnn2PC8DRs0kvk1FGi4TgsTE10NuF6lcjXsczmEK9dugAwiOIXH5SuWSZecaM+5NQzFa/5g1/DhI1GwYGG1fhgPhP4qgV9EgBbevPkKiDtT+QqVMHbcJEt0qE+fpP2qXqM2cuXMhVq16kgwisqVqiBDugyYbw1IwWqz3aJVomSpsli1Zh0+fPiAWbPn4s9cuZE3T15s3rpDvp2UKlVq9OrdF0uWrkCRIsWkLecg/8XL12jX/i9kypgZpUuVxUPrd51ovU2RPBXKla8kEbHovsQAHCWKlwItsstXrkbjRo0llPCjJ09B9y3OzStYoBCWLl/lK9HpM2dJW1+6TDls2LQFLHPx0uUSTINupqPGjEez5i2RPn0GtGzVFu7WYCLOzoORJnU6rFy1Rsrt3LkbUqVKg4YNG8s6I3Hxm06MYnj85Gkp16gAmRiRsIxt+qsE/DMBFUD8893Tun83AVolnAcNRYXyFdGsWXMUKVocS+zCQbIzYafFiYg1atTCqdOW8IfGyeiT26hxU+TIngvjxk80NssvIzEVLlwMkyZNtW3nPAhq/kuULCNzIlg+vz1SvVoNlCldVoSPjZs2o2TJUuDclKHDRskHBbm/ffu/4HLwMJav8L0j5EmmTJkmnWfTZi3Azr//AGfxQbZVABBXLPpRM2xkg4ZNQKGEYX9r1qgtkz7ZoTM0JAUOhhKmlo+JoexZh+LFSqJixSpSD8MtwFw+lxmVKkf2nGjZuh1u3LwlnSctRhRyBg8ehtFjJ6BWrdqoUb0GhgyzhA6mFaZv3wHgnI7HT56Ky0bNmrWF11qrAMWySpYsLe5cZGekTZu3YOgwi3+1sU1/lYAS+DUEOAeCc+Nu3rqFVyaXSIbHNpQ4DP39/PlzqSADSphDlXMjlRbXr3uHIGc4dLZBdC998/qN/NINitbmp0+fgfPE2CIwQAVdPhnqm4mBPmjJMBJdNOXr7N7Nh+x6+dLbdZMfCfTystRJvlb+6ZO4yxpl2P8y//UbN2BYZd3c3GWS+odPnyTICK+bp2M93ptcuRhimIEzmMiAk9PtQ4g/evzYFs6c+Rh6nO0jvx+lSQkEFAIqgASUO6nX8U0EOH6lZs2c2KH5TD57qS8NuH0e889r5g9hmQfSnz76PP+Pno+d3Ld0UF8q31ynj58xgY+vr3/tas3Xac7nW5nm/d+7zA8V8qvwmpSAElACAZEAP9B66NAR+UCuMc8wIF6nXlPgJKACSOC873rVSsBfEqA2cdy4CdiydTsuXbnqL69BK60ElIAS+BYCdG2jiyq/3WRvLfqW4zWPEvDLBFQA8ct3R+umBJSADwK08tCHOnPmbOJG5mOnrigBJaAEAhABTkLnV+APHzkagK5KL0UJWAioAKJPghJQAv6KwK1bt3Hi5Cl/VWetrBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkpACSgBJaAElICDCagA4mDAWrwSUAJKQAkoASWgBJSAElAC3gRUAPFmoUtKQAkoASWgBJSAElACSkAJOJiACiAOBqzFKwEloASUgBJQAkpACSgBJeBNQAUQbxa6pASUgBJQAkrAB4GPHz/h46dPPrZ9+PDBx7quKAEloASUwPcRUAHk+3hp7n9J4NnzF7h+/Qbu3r3no6THj5/gyZOnsu2thydu3ryFly9f2fK8f/8et27dxus3brLt1avXkufxk6fwOTSwHfKPC+/f//Mg4t27d3KeJ08tdfvw8SNcXV1x+86dfyw/IGZwd3eHu4enn7k0Ly8vuLreB58ZpgcPH9meo69V0v3tW7i9fStZPtkNLo3j+Fx9tK7wut3cv57fOO5n/XKQ++z5859VnJbznQSGDx+JkiXLoHbt/+F/9Rpg5649cLc+A1euXkPFilVQvnxFHD16/DtL/r7su3fvQYEChXD48JHvO9CUe978hShatDhmzZ5r2vrzFk+dPgu2yUyzZs1BuvQZcPr0GdsJ+F5euHjJtq4LSkAJKAEVQPQZ+H1xFN8AACAASURBVE8IeHi9w8CBg5AqZRqUr1AJ9eo3wvkLF3Hi5Ck8f/kK8xcsQtYs2VGsSFGUL1cBDRs3RcYMmaSDf/n6DV69fo0mzVqiSJFiKFKoiOSp36AxSpUqi5QpUmHN2nXffB1e796hbv1G2LZ9x1ePuXP3HgY6D0G79p1QvUYtFC5YGKVKlka9+g2lMy9RvCRu3gpcgsiJk6eRL18B4fDs2bcNjl+/doPLwcNfZf2jO+/cdUXrNh2QL29+lCxeEjlz/omJk6b8Y3EHDh5Gtmw5ULpMOVy5et3X/DVr1EasGLFB4ffcuQtIny4jSpQojWvXb/iav3HjZsiZ40+8N2nHr9+8hW/lZF+oh4eHPHsrV62236XrDibQoGFjxIwRC/PmL8CBAwdx4OAhDHQejKRJU+DsuQvw8PBEzRq1ECRIECxfvtKhtTlz5izSpcuAzZu3/vB5tm3fKXX93//q/3AZXzpw7/6DWLrC+xldvXo1kidPgWPHvAWzfgMHY8PGzV8qQrcrASUQCAmoABIIb/p/fcnnzl9EiuQp4BTGCTNnzsKDBw9Fa71k2UqECxcR02fMxNu3Hug/wFk6yUoVKoEWh6FDh8t6wQKF4P7WA7R21K9XX7b16dsftJrs3LUbv0WIiLChwuDQ4aP/eGnUfDds2AShQ4bG3n0uX8x/9tx5FCpSHGvXrseLly+xfMVKOW/qlKlx9foNXLlyFVkzZ0XYsE6YM3fBF8sJaDs8vd7hz5y5EDJYCDx89PibLm/s+Mk4ccpbG/pNB31jJlqxHjx8iNw5c8n9mbdgodyvfzrczc0duXLkQoRw4b8ogMyeNQd1/1cPFFh5njy588o5rt246WvxkydPRYcOf+Hd+/ey/9btOyheovQPCyAs5MKFiyhStCQWLFri6zl1488lQMGiTZt2cp+3bN3mo3B3N3e079BJLCHcMXnKVIQKHhJHjh7DiRMncOz4SR/5uXL3nivGjB4rVlRj5759+7F5yzZ8/AS43r+P+QsWwuud5Zk5ePAQNm3eCi+vd5L95auXYD3Yhr585W0RpjVu6tTpWLZ8hVGs7XfGjJlYsXI13n8w7HfAPdf7SJsmPdq3/8uWz37h3bv3mDZtOijwMNEyuH79BmzdZlHUXL16TYQtmyUQwL79B5AjWy4RxsiB17Nr9x5cumIR6lnmgAHO+C3Cb+jWtTuOnziF7dt3iEB38NARuLm74+HDR1iydBn27ttvXyVdVwJKIAATUAEkAN9cv3JpxYoWlw7dfhBFz5caNWujVas2UtW16zZIvvbtO9iqniNbDoQMGhzXb96WbQOtQsqiJctteTq07/hZ+TT/j58wUYQdIyMHF4ePnkDGDBkRLVIUnDxt6WiN/cYvhZ+Spcpi6rQZxiZQIAkTMhRKlChlGyxcvHRFzhsrZmycOXvelvfFi5fo3r0nBgx0tm27ceMWhg0bgfnzF+LCxcviKrRw0WLMmDkLN27eBAerw0eMln0cAEyYOAmnz5zD/QcPMXHiJLgcOGgry35h48ZNGDduAnhepsGDh/rIP236DHTu0g1P7SwWs2fPRZ8+fbFs2XJMnDgZq1avkXpNmjxFBK43bu5Yu249Ro0Zi4OHvC0YtWrWQtzYcfHk6TNbVfa7HEDLlq2xZu1627bHT56hT5/+iB8vPho1aozdu/fa9q1asxYtWrTyUc8XL1/JdcyZM0987o+fOIGxY8fjjdXtznawLwu1a9ZGuDBOOG918+C5yP/o0WN49foNhg8fgW3WgZRxeK1adZArZ25cuXYDixYtFmHT2Ldjxy7MnbcAvMeGi1bTJs2QNlVaHDh0VOq1fcdOIztOnjqD1WvX4dp1i3By7vwFZMqYGWGCh0S37j1x+co1W97JU6ahdeu2PlxqaFki9917LIw4SJ0wcbIcw+chW/ZcOHb8hK0MXXAMgR07d8s7TaXHB0oIviTDDWv4sJEIFzqMWEZatW4n78SmTVtsR/D5oAtX4UJFUKFiFZvA3qhxMzlH6VJlQCVMmlRpxLJLoWT4qLGyr0vX7lLOi1evpM2JFDES1m/cJNvo/tWmbQfUq9cQv0ePgZKlysgzShcovm81a9WRZ69qtRqgwoXp0pWrSJEsJTp06Cjr9v9u3b6Ljh07I2++AmKhvm59jpu3aCX1qVC+IiZOno4/4v6BqlVr4OPHj+L62rpNe9mfKEEizJ47H5evXkeaVGkRI9rvInR5eHohQ/qMcAoZCnzf1m/cAuP6y5YpD08vL5w5ew7JkqeG86Ch9tXSdSWgBAIwARVAAvDN9QuXxsGaU+gwyJYlGzy9LFo+DuiMQR3nghw+YrFczJ23UDqztq3bStV37Nxl69xeWP2Lu3frLttmzJxju7wC+QsiQ4ZMePT4iW1bu3YdJN9Gk9mfA3+3tx7o3OlvhHcKj0uXr9jymxfGjh2HggUKmzfJQDlc6LAymLh3/6Hso4axUqUqcp4pU6fJNg6iOdioXLmauGnVb9BItlMDmSVzNsk7f/4iPHn2HEkSJ5P1hYuWwPX+A1SpWlMGsrTkUGP4R9x4GDV2IgrkL4QY0WPgyJFjPupkrAwZMkzKGTpsJFauWYeYMeOgVavWsnv8hEmoUKESqlevibr1GsLT00u2d+/eC8mTJUe79n8hcsRIyJw5q7hIUEghT7qW0EWO7j9cLlqkuHE6VK1SFfHixMNzq8BDDW7lKtXQomUrxIkdByNGjJK8Dx8/Re7ceREsSBBx29qydbtsp4CRPGly/O9/9VC0aAmbRvmNmxvKlCkv5xs3YRIWLFqKiBEii5bVdvIvLFSpVBnhncJh916LFpVa2OjRfke4sE4YNGS4DH5o9aIrChMHl3Xr1kei+AkxZNhI1KhRW6xoK1etkf07xLL2G6JHiSbuf9zYuFFTxIoRE+MmTEaXLt0Q6bfI2Gq9JgoQ0aJEQ5KEiXHX9QHWbdiE36NGQ9xYsfFXx79lAEgrX7v2HWVQ2qBBI2RInwnbrfU5euyEWAhTJEuBqzduoX37jggbNrzN3atWzTpwhPuMXKz+sxFYumy5PH+jx4y3bfvSAp9zvht8/y5euYbwYZ1QtkxZyc6J6ylTpEbJEiVlnc966lRp4OHpiSNHjyN0yFBImjgJnr14ifFjx0k5tLww5c6VG3Fix8X9B49k3ajT+g0bZZ1uqAULWtqnc+fOg8IK29N9+1ykHLaVK1eskuX9VsXF1Ws3xFWV7aJvqUzpckifNr3s6tmjFxLETyiWPw8PL0SO8Js890+fv8Bf7TsgVIhQ4DLT6jXr5DxNmjSzFUtFBLkYLlgDBw6W9sywED1++gzBgwYHFRlGOn/hEnRiv0FDf5VA4CCgAkjguM+/7Cr79B0gnZG5s6H2TDRodpN/Z89dIIM2dsAd/+6KhAkSIV++gtJhGxdAjX3woEHFHaZa1RooW7Y8xk+cDLrUmNP58xewZNlycdPids7VOGJ1kWjTuq1FADFppY1jGe2mdOlyaNyoibFJfukHTkGqcOGicH1gEUCoH6VLBjtbWimYOH8lVsw4svz8xSuEChkanMzKdPL0GcnLuTBMLawd9fz5FhcuYyK11/sPKFmiNMKGDI2jx09imXUw4ew8WI6z/0cNftrUaZEubXqZa0HLxD1XV3F3Y90uXLgkAwYu01/98tVrUo9RI0dLUTxXpoyZZMDBDRSmwoQKg9NWV4zE8ROheFFvAaRK5aqiCX39+o0cX7lyVZQvV1GWM6RNh8yZMssy/3Xu0lXOtdpqGaE7CevRtUs3ydOrZ2+Ec4qAi5cuyzon+oYMHgId/uokQgKFILrs/VOqVqUaIjiFtwkgzN/O5kqzHbfv3pPzjhkzTorifW7YqAmCWpkcOXZSzsuBopHq1qmL38JHBC1BTBxkse579+6TdQ7YKEQYri6cgxL5t0g4e/4iPN99QO4/8yBposRGcTIBmMefOnVKtmXMkBm0DhqpRLESiBg+ApatXCPacvJ/bh3o0d0mU6ascLUKv8Yx+vtzCVA45j0aNPiftfFDhloE/2vXruPVG3eEDhEKdWrVkQrduHUbWTJnRcb0GdC0aQvkyplL2pybt+/g7Nnzcg66JDF1aG9RlqxavVbWy5YuhxjRotsUJJyHEi5sONAqxhQzRkw0qN9Qls3/Xr9+Le8RLc2pUqQUK4QhkF+5dh3Jk/tuAeHzmyVTFiRLnAR9+g1A8aIlEDJ4KHEfoyvYb+EjoHmzFnKqhvUbIFrkqLhhtUgvWbocIYIGR31TfSiAhAoewjYJnRbAiOEjYvcey3vDgsaNnyBCCIUmzhEzu5eZr0mXlYASCLgEVAAJuPfWT1xZ9x69pLOtWN4yQGWlDOGDv+ZElydqsemas2XLVrE6GIM7I1/Xrt0QKlgIUBs4atRoKbtHj17Gbl9/GbWpSJHiiBopMrJnzYbkSZIgcvgIKFK4KB7YzWN48eqNDK6bNPEpgNAFySlUGBQqWMR2DGvfpElzqQNdeJhiRo8hk9W5TAEkWZJkSJIosS2MZ+VKVRA7Vmy8fPUaj588Qbas2dGgQUOcOX/J5mr08PET5M2dTzSkHz4BS5datLL9+w/09foOHz2GeHH/EBcocwa6DAUPEgScTE2LTLLEScWtrHuP3lJn+l0zlS1TTtYvWi1Cw4aPAF0+qF1lSvBHApQvV16W+a9KpSr4I048seIYG+li1KRpC4QJFQqVK1U2Nsu95CDfcGdr0ayFnGuF1dJA9zAO+NassQQRWLhwscwv4Zyb70lVK1dF+LDhsNPqwsRja1SrIQLEu4+fRBvL89AVy0i0gFDAuOf6AEePn0CwIEFRv14DY7dcR6zfY9qiX9Wr10CEnJu3bkkeRkCKHTM2OLhjKlGsOGJE/x13Xe/j3YeP4gZDC4sRHah585ZyDrrbMZF7wvgJcfuuq6znzZMXaVKlhiHYyUbrv9mz5yBMyDDYvmOXebMu/2QCu/bsleeRwiWFyK8lw/JINz/XB4/gFDos6tSqLYfQ4pA4YWLkzJ4DO3bsxKFDh8W9kTspjHMOVcvmlkH93506yznnL1gsbk0lS5RCnFhxQFcrppmz5iCcU3i4uByQ9Zi/xwDnyfmWFi1eIkEYWrdqLcKwMc/t0uWrSJE8Jdr6YgHxfPceGdJlkLld+11cxCJtWJNZB74jta2CVcMGDRE1UhRxFeX5V6xcJYJ73Xrek9spHIUOERKnTp2WKvbq3RdOoZ2wdZvFAsqNFKQjhosgQvqS5atsVkbfrkm3KQElEDAJqAASMO+rn7mqU2fOytyJ6NGi4/rNzyNGURNtDMimTp8pHTHnSnwp/d3pb9FaGxMh6VfMgeXXIh9Rg82J51kzZ0GhAoWQLUtWRP0tEkoUL4WHj7zdtnjOV2/ckChhYjRp3NRHFQ4ePCwWkEKFiuDe/QeyjxaLdKnTyvnpVsFE7SAHokz3Hz6WslKlSAW6ZDDx2ljfKVOnyzonfUb+LTLmLVwsghk30kUhb978SJQgIZ48ewFqGXlMt64Wq4EcaPrncuCQuExwHoc5tWzeUo5jhDGGMH5nDTt84NBhVKhQUeo5YcIkZMmSDaOtlgEez2g/USJFweXLV6W4JImSoFyZcraiq1aphnhx49nmlFy7fl0mt27ctEUEPLO1pHPnrggWNCimT58pxzezWhEMd6zx4ydIHdet2yD76c4VIVwEm0BiO+k/LFSpWBkRnMJhjymwQJ3a/5N5IRTojh47Lufh/BgjMbwqXbQ4v4dub8GDBEXN6jWN3d4CiJvFj54CSLgwYXH5ioULBZBkSZPj7r37ckyxIsVk4Hjrzl28+/ABmTNmRoJ48eFmPZ5aZE5avv/A8vxQAOYg1bCo5cyeE3n+zGM7v3mBc4dolTK4mffp8s8jwAhmxrwGTjI3J4YF37Nvv81SOGPGLHmmGJCCKUzI0GjerLksc1I550vw/psTwzm7HDosVoM2VjfJHt17SDl0Z2Li3Ai6QD18bAnywEAYtCBwThRTyuQp5bkxZqicPXsOT58+A99rthOcGL7SGjSD1lGmBw8fSxS3nj17y7r9P1pQkydNZtv81sNDBGfO4aAFpHFji0KmSaPGiBY5GgyX2D179yNypMjiTunpaQmFTUUJlQ6MIsjENi9ShEgS8ZDrhuKpb7/+Pq7bdnJdUAJKIFAQUAEkUNzmX3uRvXv3k46GYWuNTpM1un//Abr16I0DBy3x7ek3zw502LDhX6xwf2unxfCrTNRex44dR447cMg7Tj4nAdOd4skTnwIGj2nXtoNEwTJPDDZOyPqVLVsB1FabEycXs27FipWwbeZgltvom2/MV+XE0sSJkkgeutBwPycdG4mTtCNHioJJkyyDG0aEYZ5pVoGE+RiymHH/48W2uHIZk/MHfcEFixFy4v+RAGPHTTBOI78ULlh2l85dbduNeRtnzl6QSZ/0Caf/tTnRZ5vHGVpQup4Zml3m4xyQuLEsdeM63YhYV8/3H5EmZSqUKGbxe+e+IcNGiGvKmLEW1yeGPmbZdF9hatigkQxgjEHc5i1bESJYCGzZ4jMCEV01qP03Bi9ysOlfrRo1ESxIMFy7YbFOcFejBo0QIkgweeYuXrxk4WwKLNCoUVPRRN++c1cmibNedev8z1Yq3WmiRYlqW+dcGuZhpB+mpEmSoVTJMrb9RQsXQczfY4rgyY358uRDvNhxQQ0zQ/nOnTdfjjfczX6PFh1lSlvmDDA/Ay4wMpdvacL4iciQIbPN+uZbHt32cwi89fSyzPGJEAnt2/2F6TNmYdCgIRgzbgJoufvw4aNE5OM8MT4PEyZOkYABXOZ7Ycxpo5KB29KnyyAKkoWLloprndFuFClUFOcuXEK5spZ5TwxMwHDVsWLEkuMYfYrnoiDNcoZaFTOLFi+V9Vw5/8SIkaPACfA3b92WyFrMV6liFfTs1U9cnNq36yDPPyNrcR8DI/gWvW7+wsXy3mXPnhPDR4yE8+BhIkAwshuP45ytTVu2oWiRorI+bvxEgU1lDINMRIkcFXfuuYrLYKGCFi79rBZbBttgGZ27dJdJ8cZcDwaF+F/dBup+9XMeWy1FCfg7AiqA+Ltb5j8rzM44dsw4yJUrt0SI6tatJwYNGYY5c+fLBS1bvhKZM2WVjoqTNdkhmhM7TefBQ8VVgJ0ZJysbA3tOAA4ZPCTChHbCyJFj5DC6ajGfb7Hn6U7AfSesLgLm83B50qTJoH++keYtWIzyFSqLVo+dbc9efdG0aXMw+lXvPv1sA3Xm5yT0atVrSf7s2XJIpBqjHP7ScpMvfyGstvp7c/BbtFhJrFixypaNWm5aATiplfNi6tSpK/XlNyh8GzyMGGlxRcuVKw/OWN2mWBitS5wAzWutWq06xk+YLG5fDGdMC8uYMWMlxOeC+QtkDgu/O8FETWrYME4yn2XxspUyEZsuIBxY0XWM94dlcl4CU7VqNWS9a/deqFatplhPlluvh9GxKBjkz18IHNgxLViwCMmTpZAJ/Llz57dFwuJHJitUqCxllS9fyaZBZTSt2LHiyXZGKTInTtQdNHiYaJvpd16seEkcOnxEIuvEjB5Tjuk/YBCGWxnxntAlisJVksRJZf+o0ePQpatFC80IPvwuDV2yaJnidTK0L+8bB1283yyvYsXKMkfJCB165NgJqQPzjxlrEQRHjhojx7fv0BHUKDOcb9v2HVG8eCkJCkDrict+i1vNkWPHbQPPWbM+/1gc59nUrGlx7zFfvy47jgCjwzVs0FgmeTNowL793mG7r9+4Ke0YQ+wuWbpChAO+TwxOsM46WZw145yrZk2bo227v2wuVZMmTUHvPn0xe8487Ny9V9pACiV8n9Zv3CyT2jlPjN+qoZWViodevfqAwSqMRKsIBRZadilcM3G+EAV9ln3qzHlw8nrv3n1FaGfdu3XvgT59+uGmdf6GUZbxy7aSwSsYOOOy1R2Tc2EGDx4ifwz8wTr07NlLovTZjtuwCXS1ZYhuWlr//rsLRowYabOqMow53xsqSBiRzkhXr13HVWu0LWOb/ioBJRB4CKgAEnju9S+/0hs3b4kPPrWJ7NjMoWsZ7pWWD4Y/HU9No+nDVqz4kyfPpAObNWu25HF2HmQTXrj/+ImT0knyK7xMFy9dwoyZs23uLrLR+o8D1CVLlonbgnm7scyPHubNWxAc2DMxMhLdCCgYrFq1GqPHjJWO3RxW1jiWv6/fvEG/fv1ljop5u7FMdy1+Td1IHNQbk425jeFWab1Zv34j6Na0bNkKrFy5WmL7MyyvfWK0sDlz52HmzNkSucp+Pz/SSD9zI54/9xctbNFk0m2Eg2b+MWKWEZ2Mgx9G+aHQwfqOGTsex4+fkMhX5MHBE+vFRCGK4YTpRvXsxSv5EjItGUbifJ3Va9b6+EAfBzu0vhw75h1alhNReQ0sl1pnwzWPPDkBt1r1Ghg9eqxRrPxSIKP7HV2Utm3bjgEDBsp3GRiEgPNO1q1bj7lz52Pr9h3yTQNqdMmaLiuc3EuuDH/KOScUAikIP3v+AidOnJTvPHAASe0xI3QZieGKGbGIYZKNRFdDCl0UqI3wvB5eXjJg4/wNc6I7Gq1SnIxsJJZFrvwmjhGJy9jHb0NkypxNw/AaQPQ3QBDYtXsvtm7zDmUdIC5KL0IJKIFvJqACyDej0oyBicCVK9dQuGgJGfQHtOumsEYXC85lYIQpDsbbtG2PqtVq4YNdYAC/cu30zZ8yfTZ2WyNQ+ZV6OboevFfFSpQWgdnR59LylYCjCXBuDL+lxDl2nIjOj8lqUgJKIHASUAEkcN53vepvIHDnzl307ecsXz3/huz+JsusOfOQI+efYlW4dOmyWDYGDx0pMf394kW8efMGCxYu+aYv3fvF+v9onfjdkL86dvbVjfBHy9TjlMCvJEC3UE6yp7stg0NoUgJKIPASUAEk8N57vfJvIMBJoMbciG/I7i+ycBIoQ4hynkW9+g1RtlxFMJqNX028B0aEHb9aR0fUi/fpW74C74hza5lKwJEE+Amo9+8sH6Z15Hm0bCWgBPwuARVA/O690ZopASWgBJSAElACSkAJKIEAR0AFkAB3S/WClIASUAJKQAkoASWgBJSA3yWgAojfvTdaMyWgBJSAElACSkAJKAElEOAIqAAS4G6pXpASUAJKQAkoASWgBJSAEvC7BFQA8bv3RmumBJSAElACSkAJKAEloAQCHAEVQALcLdULUgJKQAkoASWgBJSAElACfpeACiB+995ozZSAElACSkAJfDOB5y9eolv3XihXriJOnT77zcdpRiWgBJTAf01ABZD/mrieTwkoASWgBJSAAwjcueeKVCnTIEiQINiwYZMDzqBFKgEloAR+DgEVQH4ORy1FCSgBJaAElMAvJfDg0WMUKVwMIYMGw86du39pXfTkSkAJKIGvEVAB5Gt0dJ8SUAJKQAkoAX9C4OGjJyhcuChCBQuOXbv3+JNaazWVgBIIjARUAAmMd12vWQkoASWgBAIcAVpAbALILhVAAtwN1gtSAgGIgAogAehm6qUoASWgBJRA4CVAC4i4YKkFJPA+BHrlSsCfEFABxJ/cKK2mElACSkAJKIGvEXjw0LCAhMDOXToH5GusdJ8SUAK/loAKIL+Wv55dCSgBJaAElMBPIXD/4SMUKlQYoYKFwC51wfopTLUQJaAEHENABRDHcNVSlYASUAJKQAn8pwQeP32GEsVLIWSw4Nixc9d/em49mRJQAkrgewioAPI9tDSvElACSkAJKAE/SsD1wUMUyF/QYgHRKFh+9C5ptZSAEiABFUD0OVACSkAJKAElEAAIXLpyFWlSp0XYkKGwb9/+AHBFeglKQAkEVAIqgATUO6vXpQSUgBJQAoGKwMVLV5AoQUKECx0GR48dD1TXrherBJSA/yKgAoj/ul9aWyWgBJSAElACvhK4fdcVSRIlQdiQoXHq9Flf8+hGJaAElIBfIKACiF+4C1oHJaAElIASUAI/QGDTlq0YNnwUNmzaguEjxyBZ4qSIGvE39OjZB5u2bsfQ4SOxavWaHyhZD1ECSkAJOI6ACiCOY6slKwEloASUgBJwKIHVa9cjapRoiOAUDn/EiYvkiZMgacJEiBcrDmJEi46gQYJi1OixDq2DFq4ElIAS+F4CKoB8LzHNrwSUgBJQAkrADxGoWqUaggQJgji/xxDhI3mSpEgQN55sy/NnXnh6vfNDtdWqKAEloAQ0CpY+A0pACSgBJaAE/DWBQ0eOIX7cP8T1KkmChEiWOAnixIiJ36NGw8JFS/31tWnllYASCJgE1AISMO+rXpUSUAJKQAkEIgIVK1ZB2BAhkTh+AqRIkhROIUOhVKmygYiAXqoSUAL+iYAKIP7pbmldlYASUAJKQAn4QmCfy0H8HjU6YkSJiphRoyJ61GjYuGmrLzl1kxJQAkrg1xNQAeTX3wOtgRJQAkpACSiBf02gTp16iBjWCRHChEWZMuX/dXlagBJQAkrAUQRUAHEUWS1XCSgBJaAElMB/SGDv/gMIGSw4ggUJgs1bt/+HZ9ZTKQEloAS+j0CgEUDu3XPFkqXLv49OIM693+UANm1W8/2vegT4ReOjx078qtN/8byr1qzDxUuXv7jfvGPc+ElwOXDQvMnX5Q8fPsj3Cw4fOerrfkdu5HO+ddsOR57Cz5Xt9e4dFixcjJs3bzmkbh8+fMSIkWOwZ+8+h5SvhX6ZgIenJ3LnzoPcf+YJkJGvXA4ehuv9+18G4MA9bKf4TRWXA4cceJZ/VzTb0A0bNn1WyNu3b/Hx48fPtps3nDx1GqvXrDNv+q7lg4cOC5/37z9813EPHz7C0GEj8OTJk3887tDho19tVyZOmmK7hus3bko79+nTJ1/LnTV7ri2vrxm+YaOHhyfmzV+IBw8efpbb9f4DGXP+E/fPDgxEG/61AHL3nitat22PRg0bo1WrNmjRsjWaNGmOBg0biwmYHfyvTnwh/syVB1kyZf3hO30qlAAAIABJREFUqrBhdx40FAUKFMb27Tv/sZwZM+egdu3/oVbN2mjZqi1ev3nzj8f4lQxXr11H2DBO6NTpbz9RpXfv3oODJkclNtq169RDixYt0aJFK7Ro2QZly5THtOkzbackkxo166BChcro9HcXsE5GevHyFXr26otixUqgY6fOcHd/a+yS3+vXb2DM2PE+tn1tZfeevWjcrBUuX7n2xWwTJ09D9eo10b5dBzRv3gp16zaQ5+2CSTiYOWsOSpQoDYboXLZshY+y9u5zQe06dVG8eElfBfPFi5dh3boNPo7hyq7de1Gzdj0cOXrss332Gyjwnz59xn7zZ+tsoOcvWIRz589/tu97NrDB53v6rencuQsIHSosenTv+dkhrHvJkmXQqFET1KtbH1OmTpM8Hz99woiRo1GkcBG0btMe933peD4rzI9taN68JSJHioJz5y86pGbG/Txx4qRDyv8Zhbq/fYtLl6/+jKK+uQwKfA0bNsa+fft9HDN5ynRkz54THf/qhHfvvdsVH5m+Y2Xq9JnyPn3HIbasFy5cRKnSZbFu/efvvi3TPyywrStatIS0pQ8ePrLlnj59FkqXKoPOf3dG1SrVMXv2PNu+nj37IF/+gtLHvv/g+0B59Jjx6NNvIF6+fCnHUSCYOHEyxo2baCvHvPB3564oU7YCTpw8bd78w8t8rufOW4DTZ/zmF+Zv37mLCOEionmzFj6uke3i+YuXwcF5lSrVUPd/9dC+Qydwuzndun0HzVu0xthxE8ybv3n5zNlzmDtvIXhfvic9e/ZMuL548eKrh+3bfwDJkqdGp7+7fjHf8uUrceDgIbx+44YM6TMhc8bMX8zL/o19rZE4xvje949j3XBO4XHnzl2jGPn9+PET8ucrgIzpM/rYris+CfxrAeT16zfYum07MmXMLIPtbdt2YN3a9di0aQsGDxmGvftdfJ7xF6yx4ejduy8qVKj0Q2e/f/8h8ucvjIYNm2DBgkW48Q2aQw5u+ADmy5sfW7ZuxzsHDqB/6KK+ctCz589RulRZ9OzZ+yu5/rtdy1c69iu+9x88wPIVqxA/Xnw0btQEO3bswtix45EyZWp07dYTFGBfvHiJ1WvWCpPQoZ0w1tTpeXp5Ydv2HSLEzJ07H9QAm5Oz82BEjBgJl6/884CH5+nZux+O/YP149SZs2jRvCVix4oDNrrr129Enf/VQ7r0GeV95Pk5+F+yZBkK5C+IhAkT456rd4dDxYHzoCGoW68BOOgwJwpQKVOkRtmyvvuQ79i5C7Vq1/2sAzOX8V8v37pzFxcuXfmu03p6eqFokWLo12/AZ8ddvHgJ8+YtQLzYlm8pcIKvkYYMHY4okaNh0eIleOvhYWz2N79Ll/KZKISz5/6dwOdvLtiXih4/eRrrN2zxZY/jNp27cBGZMmXF/PkLbSeh5rd+g8aYMGEismXLgQsXL9n2/ejC27c//kxSCx4lSjTMmjXnu0/v/tYDTZu1ROs27bB8+QrRLufPX0gGvCyM7VHqVGnk2yTFihbHeVO7w/FCo8bNsG79RvimsD5y9Di69eiNR4+8BZpFi5ciWNDgaNS46Wd17d6jN9q37wg+6737DRTFyWeZAtiGly9foWKFSujY0afikIqGw0eP49y586hYsbJERtu8ZRvc3Nw/I8C+v3qNOlhqp7D6LOMv2HD79h00a94K/QcM+sezv/XwRNeu3VGufMV/zMsMT58+w8rV32/9Yf+aI8efYN3MiVaX/v0HokSJUviSBcacP7Au/2sBxADXrv1fmD17rrHq4/fO3XvYvXsPDh8+isePn+Dho8fYuXM39u13gWEce/z4MebMmfuZduHVa4vl4OjRY1i7dr2t3F279mCvnSbJ3cOi/bxx8yYWL/YZ+/zIkaOoVKma7XguUGKfOWs2Hj3+sunv2vXrqFCpiq8aIWq0tm3fiS81+IMHD8NA58G2c7JhfW81g27YsNFHA2zLZFrgQz1r1mycO3/BtBWgxmDxkqVYsWIVOPg1Jzd3S+dDM+yRI95aag5SqeEwJw9Py7F0laDgaE5DhwxDmzbtzJuwectWLFvm043NKOPp06dSpydPnsox5LZ8xUofx3OF93/GjJnSGJp3Gh/K2rp1G44d93Y9Yhm5cubG+PETcN7KgQ3tihUrsXHjZjx99sxWzPMXL8VS4u7ujp07d2H79h04fuIkLl22DExdXe9j4aLFuHXrtu0Y8wIF1KWm6xsxYqR0lqfPnLNlu3z1OhYsXoaChYpg5crVtu1c2Lp9F9gJ2yfnwcOQKFESdOva3X7XZ+sbNm3F4qU+rRXMtHnzFixYsNCH5YWa1MJFivkoI16ceMiRPYePbS4Hj6BHzz4oVKSYj+Pv3X+A3Xt9amN54Jmz51GxcjWkT58Jx4/7rsVmB9WydVsf57FfefL0GR49emzb/NH6sm/ctPmzZ/rlqzfyXBuZKRwsXLgILr5YUGmxmjtvvu295XtcsGBhdO/RC0ePHTeKwNWr1zBjxiyw/bFPq1atxv37DzB/wcKvWvp4DTly5EL9eg2kiJu3bqFZ89afWaeo8eLzYO92RE0c0/UbN7B+w0a8t9MOso4LFy728a4yv5fVwkaBlPtfvnot5Vy8dEkETKPNunLlKnbu2gN2uEzUPtLadfuOz3dddlr/sb2qUrWGaAqN7fdc72P+/AU4bnr3jH18JylsGYl5qDnke/rs2XPZvGfPXqxd590+U/P9xs1y7czAe8lOnulLQtvr16+lTTO383KA9R/bvJWrfL5zxn4K4PwzEuu4Z+9+2zW6ubtj3z4X0JK5YeMmeV44UOOzYyQeM3fuPLB9MSdpO61tBl0uvpYeP3kiboeGssHLy0ue9Xv374vrIjXR1LTyVdi+YxeSJUmGEsVL4v7DR7a2g+0+33Wzq+PLV6/k2b5509J2sS3ctXuPrSoHDx7y8U5dunRZ8l+8fAVubm6iQKGikO0hFXJMb9zcsWDhIhw6fETWqWihlvn02fM2JQqZHLNasu7du/fZ822rgFhH90h7aXZpnDlzFipWqGwbhD159gJOYcKKosd8LJfv3f/cjYXb+bxMmjId5naY2/n+9uzVB7369IfRtnA7XYcrValhs4ZS0UIvBN8SGRiJbQJdtZnYR/CZte8rnj57brPAMB/vCxUV9hbh127u8PT0lPbLcHXku8IB6+rVa0SpZZyXv0a/wT7NXNb/2TsL8KqurA23ndp0WopLcXd3d49hQQLBCZpAEizBPSQkBAju7u5aCsVarLSUFodSqE+nM9OZ2vqfd927T869uaHQ0inwZz1Pcu89ss85+2xZ61vfWnvTpi1y5cpV+6H6HRCItnr3bqJBxg48QrBQjEC9Yv77zsnAWLZileA9v5/QD9u0bS9nz55L9rDt23doX3U/4NZtR/2x/e69ey50MNPu3M/h983bn2h98d3UBX1nw8ZN8rlTp2Af7cAYCmvWrNV2bS+P8fofzrESfSUsfJB6NVavXiMfX3ZlFKBbMr7SF0eMGCXVq9eUdes3uBgT1D/zlbugjzKOoHN269HLY12dOHFSOnfupqceO3ZcDBuI/r9n7z6rvwJYAmBet+klRu9lLNi0ORGA3bdvv+w/kJSBgz69atVqax4w94t+Rr3T5uwGv9n/Z38+MgOkc+eu6h0wD3Txw48U+ec3aC3IT/p0GXXQu3zlirz88t8kMLCTHk4lB/XsK3Vq15Py5SvKps1bdTsVXaZMOXUJzp23UAoXLiYjR43RxhoZOUKKFSspNBC9xrnzUrFCJRk0aIgsWb5K97Vs0UqgxyDbt21X+oz+0MH/oAyJGCa1ateV1m0CtLOYffZPqGTjJkySkydPydSp8YKSawTjo3/oQGFQ8iSDBw+VsDAHGoGnyM+vufj5NpODh49oBy9btnyyFI7bn3wqYeGD1S1dvHgpAZlCGMRQaFv5t5EGDRtLi5atBQ4lMnbcBClftrysWrNeXayUz2B85doNdUXXrdtAoAMh8+YtkEqVKsus2fP0+bJnyyETJiQiC0OGREhoaLgey+C1YtUaGRAartb+6DHjdTt1AYUHr8G+g29K9eq1pGVLf6U2rN+4Wd9BROQIy0Dj2ngU6jdoJOXKVdDBGaMsOiZWWrZsLXPmLVD6U4UKleX9iw6jiw6cNlVqqVWrjqxbv1EHjEFDIsS/dTspWbK09OjeU+8FJalokeJy6PARgdJQsGBhpd3lyZ1X752DqMNq1WopyqYn2f4x+UKhYsAz0syvuWTPmkPwkBjhue7e+1wYgNOkTmcNPLhuFyxamoRXv2Xrdrl+87bMnDlLChYoZCnNpjz75+1PPpHefUN0YrVvX7Nug7QLCJSq1WpIaNhAMUYeA079eo2sQ1E6cMHT7owwCWzd5lDM8Gg0btTU7BJQRdqKXW7f/kTWrN0gX379d/Wc9HRz55tjP/roY6Wk2Q1As49PlMj8+QsKhj8DYGv/NuLt5S0HDh1W7wnv3yjRDL60caO8U/aUuGnSuk07qVq1hg70pmwUKFCwMmXLi49PM2GCReHOlCGT5M2dV72uHItiHhwSqv2nRo1aYqcD8U68vJvJ+IlR2l5jYqaY4j1+Xr16XQoWLCITJ0XJpKgY2esW3Es/OHT4qHTtFqTX27nLgazv3rNHSpcuK9MTZsrKNesld668EhMTa13jzt17MmhwpLB+Q758BWXJUgclBe9Ujeo1ldbK+6lRs7a+C7jveFOLFCkuY8c7+ireuvTpM1oGwoX3L0qv3n1l5KixMn5ClNaDdUHnFxQib9/mqoyyCWS0cRNvqVu3vlSpWkM5zeacLVu3SZeuPXQcpi3gHVu8dLmkSZNO6tVrKNeckybGHx449sdNnSZePs0EowaDa8OmzdKseSuJjBwuS5aucFEozHUwuFAu8KwVKVJMaYVm33ff/VNGjR4rDRs2ER/f5hIyIMxSFAFgdu89ID2CekuVKtV1LOG8qKgY+ctfXpDQ0IFaDO/Iv3WAxMXP0DmkZIlSOj5AqUGoR8YqaEA8r1EADhx8U1q3bieMOVCb3KkWerLtH0pEzZp15NVXXpWTp96VE6fekXTpMsqSZSv13rK9kVXgyqP0DBwcIfnz5JOC+QvKhEmTtRQUFeYMby8fKZCvoIDyIyhWvr7NJH3adDJlSpwq3RkzZpKQ4BDhHgHXsmR+Q3bt3qfHjxo9To2B0iXL6JiJQViseGnp2duhnH79978rKOHn10LSp80gAwcOVuOVd5w5SxZBGUfatm0vzz7znPToHiRTp8+UbFmzq6HvCdWFGZAzey6pWL6ibNy0xfLmAC7Zj2/SuKn87a+vCPEcCOPD6TPnkqUpoxTybjwJ/PshEcNdvCZQDM175xwUyA6BnZPM0yhkFStUlr59+8mefQd1PmW+h566cPEyqVatpiLY/3WuIg8YVqJ4KTnnpJUynzGGMA5VqFBF8CogAA1Q6mjrrMPC+6SM2KnTVddo1NhLxzYzb48YMVoqV6oizC1Qo0qXLieMITdufaLzO+OXuSblUx9Q3KCIOoAERz2yb/TosdLPBgxdvXZDttliQuKmTpdxE6L0Pu/3r2+//rJkyXKPh6xctUZatmojxYqWUGPnRydtsF279tKli0PhZtvO3XulT9/+MmzEKFW47W3AFIzyHRExTOsLg4OxqUzpctoe9x86rHXRPiDQHC7TZsyS8IGDZO68BVKnbgMdLwzQMWfufKlbr5G84wRRAER8ffxk6rQZ4uvbXAoVKirHTziM7XPnzkuB/AVlw/qN2naaNvWWbJmzqJ509KgjbGDvvoMS2LGLVKhYWSjbCJ727j166d+wYSOlRfNWSQxVjsXgaNCgoWzYuFnHY1gIH1y8qAAEnpnaderLV19/o+2fea5VqzZ6CcCcEiVKybTpMwT2R6GChdVA2r5jl3oYy5StoJQ1x/38ot4qX78WUrlyVdWjvnZS2XAK1K/XQLZs2yGBHTsL+qAd1DDP82d+PjIDhHgB6EYTJ01WuhOdY/acREsbSoN/67Y6KH32xZeSMGuOYGkjXeFZd+qq35nIatasrd9nJMzSQTQ4ZID+BtHOmDGzrHCu7Epnq1athu7j5Tz7zLMafIcFzaCXLm0GSUiYpfvxOOB+NDJm3ET1xPC7R4+eqkibfeYTykrpUmW0M2AAFSlSVKpUrmoheea45D5R4okXQGhoULJe/MvzgkKJgDJQrruggHXs3E0IuEKGDR+pkyffJ0VN1snYnIOCgwGA4HKkDuicSHhYuOTNW0AuOWMJMBAiI4fpvpCQUK3b8PDB+hsr+7VXU1kNdOiQCOUks/PixUsSFR2nx/GvfLmKMnHCJPn07meSJVNm7chmwi5SuKgEBfXRTn3+/AXJmSOPFQcQExsv6zc4JjZoLBiMSEjIAL0XYogQFGhvL1/9DorAhG4Q4PkLFkuXbg6j48zZc1Krdj11JeMRee3V13SwY5DBa4QyUK9+I7l+zRFsi3sZ7xtotSfBOISuBBoyZ+4CmTQ5Rp/dfizKOQoEgoco1auvyz0nLWD+oiUuKAbHzJg5x0J2UDJnz55jL87l+5mzZzVmw67Ug5J2697LQviYnCZPjtHzQEAxavqHhMr48RMlflqCzF+wyKVMPFPznNtQ5DKkyyCdnX0NuseyFatdjocvvWiJQxEGvUTZ9RS/hIeC94lR6Emov0wZs8iFCxcUTa1Xt7688NzzsnylA0kn5maRkwO+atUayZghkxx10jVXr1knIHUI75h6w1jBiOjarafwHhG/5q002JnvTHwG+QOF7tOvvwIf7OvePUjq1K6r52zevFXaB3bW75SZ9Y1sEjslsW3rDg//UJaeeeYZ9Ui6754zb6EO8mw/f+F9yZsnv7z77mmdGDgHZeG/P/6kKBQTNwKPvVOX7oqC8xuDjfEK6t216zclb648kitHLrnsBAyKFSshAwaE6bnULQaGkS6du4oxohJmzZOffv5FgR4mODOZmmP5BNVFkTdcdu6vk9PDg9fP29vR9wjiDLfxrfPlKyBxcVO1KMZ5Yo+MmP7/z3/9W4KDB6jihTFC32jR0vHMeAV8m7WUy5eTIrrEUE2McrRr2n/uXLmt2By81CgQRqA8BDiVkiVLl0vk8FG6i3GzVp0Gwn0jTMoGdeQ3njCAIGTUmAmycZMD6Pr+39/LqLEThHtHiK0KCe6v3urWbdvLDz/9rEAWRpTdO6sHe/gXHz9N2wpGwM1bn+h4jOcXxZT2gPfOCKCU6Y9sK168tDWely1VRrJkekNMTERsbJye365tgNz74isBYKM86u7ajVuSPk1apRRSDgpvjqzZJV/uvOZSioCi9KLw165dV1q18td9jJ1w2ZGjR49qmYMGOuYFlE2uUapEKVWI/Vu2ktdfTSXfOj17VuHOLydPnZZsb2TTcwAFVntI/HLwzSO638xbnIrRnVzsQFRUtPTs6bg/9+vFTImTIUMdc5rZR7l2bzPjZO06DVy8oxyL56dwwcKS+vU0qiSyDSMPIxwvGJ7COnXqyzGnoYQ+AuUVoAcZPCTCYjgsWbJMAVbGFLwQzz3znCqJd+7clQ8//EiDrO1sCBRMYg4RQNMX/vKCLHaOu6GhYTpvn3dSJOmfjGEI3oyq1WpabZyYw9x58ltgzqhRYzQWVw9WveNrAQw2whyB7vNrsm37Lp3zMQrssmPnbqnfoLFuwjNB29i61UFdKligsLRt01b3ASpPT5it3+fPXyiTJ3sGeQD+AK/LlS2v7ZJ5O3PGTAr8MfYxVtVv0MRiMcTFT5fUr6e1vAIo1Vu2OPrx6DHjFIQ44wRridXNnCmL5TXt0zdYAGExjo6yZk7GzLJ4sYNqiEeibdsA61FXrl6nyj8b8JoWLFhU0GGJAewQ2EVu3XbEfGCw58qZW/U762TnF+bXTBkzK2jIpiFDI634FcoM7NTNAoEAUKtUqabePtPnAITQY0nkQqwiFEUEnbZJEy/9/u2330r+/IUs4JO5B9YMgkfnpedfFDLjIS1a+gsU8cdJHpkBQgBd44ZNhE4KQsPDJsx0NEDzwCDQOXLkthQPs50XC6p79vx7Ur5cBYEfijChFCpYRHAvI6BD2bLltDLrRMdMUaNHd4oIfNMFCxMVMFzsBsHlBbdo0UoPxRgYOHioTrojRo6W6tWqS6lSZeUrm3eDA0E0Kleupt4CfoPSZcmcVS1Tc837fWIQGA8Ix0GlYDAx0rN3XzUuzG/zCW0ENMEILmgmUpQ+0FIoTEagJVSvUVsVfuqwQYNG8rVTQRs7dpwiNuZYzg0KcjRi6HDlylV0UQa8vHyVksLxBOZiQCFHj53QIGc8VmFhA+WNzG9I1cpVdZ+/fxsJCUmkasFjBulEoDdQr5cvX5YffvxJvTYg4SHBA6RJk6aSLWsORUPfPnZMcubMI8TaIDMTZikKgpmA0lmjRm3L6GOiwm0OtQWDkk72xZcOhRQvQfy06VqG+YeXK94ZVPfBxUvJZjBh8uPemHhR9NKmzSCff5FI7zLlobQcfDMxcA2vAgMFsnX7Lhc3L8jkVhstZOjQCMGrkpwcO35cUbgvvnTQ2DgOlBTj2ggeBRBrZNfuPUoliY6eIsWLlVA0yBxnPqG9gOYZVATUD0MTr8F3//y3rLZ5QJhsMBAMZQ1DvkL5SkqtM+XZPxNmzRUC3RGC8olzMmgY2wI7drWQVGgsBMQbgRIxeEgiJa1T564CSICgYBM4z4TRoX2gZEifUc6ee08mT452ideAUmgoV4EdOkrPXo4JHcST+mDS7t8/VEEJklAcPX5SatWso5QJcx8E7w8f5hldNcfw+ePPvyi6RHahn9yyyYwaOVqqVqkmGPOUx6SMxwupVrWG5aWFNoqngfHn1KlTSi381gnCcCxeQAwzZNy4Ccph1x9QD2bNUcWY32RvGTNugtmlY5zxrCxfuVqVl107HW3GE+roMECaWXSrs2fPK5Xz+s1bqggYuhnXYUzAEwqQki9vAeU1c2H4/BiDRuCZ441CCO4OGTBQKZ8oGCNGjlGEEzABMeOT/hBRAAGjwigSbJ8+fYZUrVpd44zweNnBrK3bduh9Hj12XNq16yBznQkCOI84CwMIQBM0wBaUNJPUAa/J0MiRlvHM/eItA/0GjUYZwrOLzJu/SMei7dsd9DLTj3RnMv+mxU/XNgBYYRcMj1defsUlIQTIqx3hxXOHoQRo8Oorf5MK5SrIf39wBPXyjmlbBmlv2tRLnn3mGbniNFJZhNCrSaKHk4xGqV5LJXPmzFUjyngcOZ5yBvR3AHv2e9y1e7fug7+ObNi0RX+bOKkmjZpI9qzZkngT7GWgeBH/mTljZj3XPagZA7lg/kJqqKDE4p3CQEtOuHZQkGcDBG/boCGRFpWbMjBARtjiF9988y2pU6+hnPZAKUJhRyk1wvvo1csRxE1WJi9vX6u+OQYl9eRJRzvGWw0NGmH+AEhBITx3/oJUrFRVLtjofV7efpayy/FQ3wDHEDzsjRo1lW+cwfXjx41Xvcck1NA5NthBIYNK261HYr9jjoIqa2iLgLIGyKOfbdi0VX7+JTEmEW/AgxggKOjDR45NQnHCyzt85Gi9b/4BkF1zGvy0GaO3fP/991K2bAVp2sRLvvo6kTUC/cj8mVhJDCSYHAZYYszu46SRMRc28fKz6pn+SdyiEebfWbMceibtu19IuJxwejkWLVyieqg5Fn2kePGSSodlG33e6Au7du12Aag7duwsdevU1XGvQ4eO8vKLLwvg57RpCeq9MmXi6UOPcqfqsf+tt46oQQToicTGxUtbJ3CCARLUq59Fw4JqDGuDRAIIZZo4HDypObLnUlCLfTBxiNFFmG/pb+imvFvmytmz5+o+9DfonUYA6MY5Pedm25/9+cgMEF4Y/HojoHga4/HLL6o4s/3q9ZsC1adY0eKWQsR2EGkmqc1bt0tkxDCdzNmO2xE39IkTDgOEOIXcufMpT5X9KF5ly5RXxI/fuKDm2Lwuw4ePsCZtDBDjAYHGETl8tHI8obFs27ZdFUd3bjLWfuduQYomUT6CK+tBs0MpjSnMQQHg3OnTE3SgcZTk8IAY1NZs43PhgoXSrVvSwDqUbpAce2wFwXt16zZUFzfoAVx4w8sGyS9XlgnMkUEKd3vHwI56KdABGjkKqRGsatN4cYuiMCN4aRh44ZyCGOzevVfgIn//3x8UWQ5zUrWoP/jyJngdNL1kidJy8+ZNnVSrVa8lc+fOl2XLlmuQ4rFjJ+Sbb77VdlOkaHGB6oJgwGK4IBideEo+uZPILcWwBckBuUZB+tzJLWegdo9loU4aNmyqiswXX3xlGTlauO0faC2IqaFYwB3Fte4u6zZscjFAMJJAhECkiQHBQDKCgg2FEOSib3B/bX8o/3Z3ujmWT2h+BK3ZPSC9+gTL3v2u8TnmnIULF6v3jt94iMqUqaBGrtnPJ96AufMXu8RXwI2FzrVi9VoLgefYTz75VI09v2YtlNIS0j9MA/OZRNyF58a7QwwJ3HIMaygyQyMcaCQGCeUYowIl0W6ABAZ2tjwWlA11j7gdhIkdQxduNXFhxHygSLMtZkoihcl+T/Rtg+IuXLBIPUNr167XNgv/Ff7v7n0HpESJ0tZAzvkBAR0FxPp+gidy85btioqWL19JSCpgF5Sj3r16CzzjlStWyttvH1f++aWPLyvV0MQKYRiBrt77/HM58tYRNU7szjgFRZwUhiFDhroYIEuXLtXJnOuC+k+OSfTaAACAMhohyxCZWVDE4Mm7C33X28dP69XsmxwdK1AO+of0txD4iGEjlDKGYUDyDXjnn33+uRpgUKtA+EHrEGJ9DIJNFhqdXG1jCxSel196RSdxc03zCYUVr+qRI4lpe+ln/m0C5P2Ll9QoN1QpzoFGwLgyb8ESpaFs2OCq6JtyUWhq1qqrXk9ok4Z+gScQ2o5Jyw5COGLUOIFTDkgED//jj69YtKGRI0bJi89ZVeS0AAAgAElEQVS/pN42TwHS5nrmMyY6Rg0D2q1dMNZf+esrluGDUct7AAE2wtgNIg5VrHzZclKyeEkrne7k6BhV6GnXiJeXtzz/zLM6nlBW+rTppVVLB8hmyiuYr6B6Lwi6v+3k50PTw3AZNixp9jfeNcYJiiyyavU6XVNkoDOw2cfLR7K/kc0jdZg2YGKeOPfjy1elTeu28trfXlMQy9wTnwkJs/U60BoB/oilSE4A0owHwP0YFEi8dIxHRvBMD7DNu8R54UHjvbsLwKk9UJn5hLaKfHrnUx2zoD0iZE7jfZnYCDypeLXchdiA0mXKWbTPL7/+RulC6BlGUHgbNHQYi9CB6tVrYCn7zJ8kEzF1iVeGbIeIv39bpXqacvisWaO2BaqMHjVGKZhsx6DBw00sipHYuGkyyYM3Aq8hz2cEjy9eQuMxNNsD2gcK7AVPAlCBsm4ELwjsBTy75tmVStmoidSt18CKGQZcxSNrspt169Zd6d6UA42zcpXqsmy5gyo5bvwkl+dnTpnlBLoZ47v16G0xFJg/YDUYQVGvWLGKEAKAQMuD5oTgxcEbaQQgN3RAmI4J6IgYk9AYvb2bqeFijmNc5Bk9xYZBpQLANrG3PDsGLcIY1i8kzMrEd+zYMWFuwePx7bf/UP2HeCGEzJN58uTXOGp+x8XFSzO/xIRKhCrgBQSYKFumnIxzggf0A+iORmAaRXjI+Gj2/xmfj8wAwWr1lDlj6fJVcuOmI3CO4DasVFAHsmUYgb8LlxLBfQQahmA5wgc2wcfK2ytQ2LIaUejJ5oKAtJF2zc5/J7YDagqCcmcCgkDIOnburqi87hSRO5/eU4+L+c0nSh0NdqcTTWRbw0ZNXYwc+/Hu36Ep2C1OFO/mTi8Mx/bq008mRkW7nyYHDhxUr4YdFTJ80YAAUsEmNj7Sg/o1c1DL4O8z0JrOQCYnuKhG4PMa42nbtp2SPVtOOXjoTd2N8YBrF+UJwe09YbxD0Vq/cZPGZphy+ITLD4IOAsEEbQTXftRkxzNBuQABBokhMNyvub8GW5ljr1y9Ljdu3tagKqgH8JyR+QsWSo3qtfQ7wV54VYyQJhcqGfL5Z59LjRp1zC4dUMz9WxtFpGfPPtLUy1d2OTm69n3273hQMFiMQL9gwLMLyop7+k5c7JkyZbXaF8d/fOWaHD56TDNMMcDyRxA8yHa4k5ZnL5fveDaaNPW2YjzYNm3GTJe+wjaTPpF20qVLd6sYvAHVq9cW1hCxy849SRckw+BPlSqNGv3m2EOH31bvB3XO/UI3IUganrFxa5tjUTaGjRitnol79+4phx4Dgj5qpFPnbpb3cs3adeq9M/tIYRsVneiWx2NKoD1CmsWFbgktPvv8S4mPny5tbJ5BjmVSQECsiVFCMEJLlynvYgzSl/FckCUGQ8YIRiaUFoO+me3m8+MrV2X23AUWwsU7gvZ3xBYcj+eFbEZG8OS9efiIfPfdv7T9Y6AhrB/QyIlckXQDr99yGwWOvmsUery3KB5GSClqkE3isSZMTBw3SK2JIsUzHj7iyDoIBad6tRpKiTVlmE/6IpOvQeYYF0c5KV0EP3d3Iqx4U9wDVlEIuA6yeMlS9brd+/wLK+CX7Vev35DIEWM0+BOjxASHk7IYOp1B9sz98F4aNmjsgmziRYVegsLfunVbi0LHORhgvDfirqCnhoY53jv7GPvswcrQUaHpnjlzTikeHAPY0Dc4VBOI8Pvd02cFQ98un332heAxNQGfeF2JU5o69f7GKmXMmzdflWv3oFcM0VdfeU1jNsy1oLuRmhfBw125UlX1uFy6cl3q160vZUslpvFMmDFTy4UugjAPYEgYj2WmjJksgEkPENF0vBgUKD9GvvzqG0mbOq3UqZU4dtLPMfLPnD6j1zDIMPxxzjegUjO/ZpInZ25TlMsn8ZgoUfY1EUCd06ZJp3VsP5isdXhsUv3ttSRji/04vqNw9UjGA0J/gQpqFyhXPn4t9T2zXduIMxbIfhzfoRI3b+GgovEbBR8DEAHFJyOUPVsc6LtB/DnWnqgFVBsdB1CPeCQT0I5xBDhG3KYRvFKG80/SHNK7m0yZzNvUo5F2AR0EwwKZOWu20ooMyAjghYJuaMGMkWT8RIgZs3tY2bZy1VqZNNkVxPn+Pz9IXHyCyxg4a848jaV096AyztWu7fD4Ux5G0rYdjiDt2Nip1hiFvkKbQqB316hZRz66fEUufXjJkSDmwEErHhWmi/H8cjwUNOhtCPEdjZv6WFnMWFsquL+Disp+4rPIuIZAmxs0dLiVpRQwFaDCSMLMOUplZ/xDANhMsqJVK1dbDBn2MR9NcIuVYSwiSQ06qfHeAALinTBjvLkWn+gFxMmZGGHib8h4h3z99TcK0gD6IRj+0IEBeQhsZ7kHk9RDgfgCRSyPGtfs6izn3Ln3pImXr5ZHOeh7cbEOmizGht0AGzQ4QmY4aXF60cfg3+82QHCfQucoXrSETu4O1HG5uiOxuEDPydCChWaQG7h9L7zwktIsQD9a+7cVr6ZeQsCjv387dd2SUWvMWEcg3aRJUTrBQWliMMTtSsOHdsFvUHK8KI0agsI21utgpYcPilAEkEkQBIHc96DuCGU1a95SXf1YmigH9mwL5t3gZQEVASXF/TcjYY5Oiuynga1cvcZjOjvcZiD3IHvkmybLiX8rf0mdKo3GCRBkTprXokWLe0y5Gjt1mt4fVI45c+ZbigXc8ixZsinvHzfu+EmTNcgOAwIX6PPPvyh79uxTtzaBbS8+/6IO/kz+uXPmkTy58qiCixue+AE6IVx/At7hbKIMgHISzA76xkCCcka8CtQkPEwYLwSLkdElXZp0UhyP1tffaFYy3gcBlAT/Qzng91hn0DreGgbL8eMnyKLFSzQ/O++RCZjjQKpoK639W+vvU++eVmoIeenHjB2vbWDEiJFaZ6ASBMOTmAAUA4QEzjzxJLRJu2BkEQBmuLz2fXzH7YlnhvNJbQslBaE9UH7bNgFKo0GRQtns3z/MCrA0ZWEIMojibQJpweXu7t2ibkc7g0NRsNzXkCAmCvc4yr8REgCgyEPlmz4jQbZu3aHvgwBy0EUCWEGHDe1k5qy5algyGYC4Q0lkogMcMDx9yobfjIG+cNFS7TuLlixVAxT01y7UDYGXvGcCI02edFBAPEaGJmA/h74IbQx3MEg+oEP7gA4aL8MkTdunTFzOBk1s2aq1xSUm0wf0hIkTJynFixWdTV1B3erbL0Td7gQGmnVPYqZMVSrCFudCWgR0g35D2wIJO3PWsR4JMSCv/u01geawZdtO7QPExbznlpKW2JORo0bL66nSqKfVPB+BtvQb2mts3FTlYgOQQCEg2w5UqeiYODXMWEuB43jXZCMhKPylF1+WcU4ONhSnChWrCO8KpSN84BALcRw+bLiUKlVGUfq4qfES3D/cWreDxBIF8hfSZ8MDxSRbolhJDQhfvnKtUlzXrVunNFOyGrkLhsPrqVJLfLwD/YM2QbAi/RNKE/2AhAC490PDBws0VShRxPtADyKGACGDTOWq1TWo21wDJYBEIUWLllC+MRSR7kF9NNnFpo2bpEOHTpZRas7hEyMADyk8aeKkAKVMBjSCfaF/EbPAuBEVHWst0okCRoITAm9R/KHv2Q0QEjtkyvSGC4WL6zG+glzuP3hIKXHMB/QHsu3xnNBCAESIJQIxJiibe9+3d7/2HcZBQ/+zPwcULQwD3juGrkl1Sp0R38h2jEvojSjPL7/wkqRLnVb7Fv0zZ45ckiVjZkmYPV+KFCqsx2PYwz1n/QbOB2m+fOWajuX8xqDDW8/3zBkyucwnN27c0rHI/V6Zlznez8dPDUkCpLknABK24/XkfvCE8xsqB6ATMRP8NsHx9mfnGnh8SZOKJ4m5Cwpccvx/PPJQrk0d2cuyf4exQGpyvLl2YZwFqcbrjiJpPHAcA/BHXBieQRIAGGqL/XzGoqKFiyoVjHZ965M7GnPDvAdNcuPGTfLq31IJKX2JDyLrJv0GABXFHFZF+nQZZMCAUJ3rWDgWoyUmxhGrM27ceEtRhcefNWsOpYSCmtOm9uzbr3WOwfDyS39V5RNltVKFyhoTQt/FuC9csIjky5PPAnJpX6QzTpg5S6JjpipNjueCrgTowNoTjIsE1oOmGyFzXd069TTAGf0KyhhzPwAm9KJfbH6kNm0DXBKymDKYN5jbAEuIRSDAGjAKox7QkEQIoPvQ17r36K19Ej2AGFJjkJmy+MSwR69K9drryqzAMEVHgeqKxwrvE2MwcW4k/cEDlS9vfo2Tw7PAeVBGAagWLFis487IkWO0TbGuVJOmPgpK06/bBXS04t6OnTipfc3H21c9RBfef1/nP8ANvPeAuRimzNc8Z/SUqeqN4Z4BNIgFJgEC4Cb9wZ0iCOWccf/ZZ5+TfQcO6nN6e/tpnBAMEoxS9Eq8mBgvUK2ff+4vEh0dq+31hedf0NhjqFVQt7jGsGEjVO9t376DtjuWenjnndOSL09+mTtnrsaskCSiUcNGypIhtCDVq6k0/T/tP3fO3Br7ZeZM+3v4s77/bgOERoXyCg2HQTykf6gqCGRiIVAQCgBoXJs2AVZAOBxegh4J/gYFx1KHB0sGiCNHjyvi0b17Tw3ewfUL9xUXOihMv77BGltChoipU6crJcqkiIS7OW/eQlVMyWRiDAoaMgNpQEB7W/YAUQvXx9dPEWmDKnt6EdNnzNTMTfBa7cIkTcMxaIR9Hxkw+vbpp/cLuvjumXMSETlMGyrGydnzFzTegMnfUAHs5/OdeBo8DCifJkCS7SifICrkP2fiRhioIoePlA6BHZUvCd+ZRdQGDRos23fuVl4q3iEGZQZeaD50ahRF3oX92eCw4vLmfLKTIEysUAXITISSi0C1wCjhPUI3YVLq0rWbKlhXrl3XiZzfdERDAwO9w0iE101Hx/XOhNqvTz9d+O/uZ59rEHjXbt0tBIBBiAWSmBQQUBjqjWQEZPgAsSVeAJcpSDHXtgtpDFHu3APqzDHUYVCvvlpXGENr1yUGVmPMYSCjGDGxouhwPeOWN2XwSQAykymuUBahpL6IbzICMsb9BfcNVt4+cQ3uAj0i2g3Rw0jD8KEtwDlFCBSn3nm/1I1BQdmHsYanjExJKD0M0EOHDtOkAfbroWDgLUE5xphr0bJVEsQchL5//wHStUtXDcqHv43ET58pi5PJksLgC0WOdgVdCeWfwbNnUC819M+ce0/ji+iPJuMd8VlQpYyAYoIOg87bPVdMdDwL+0D7jIBIEYNhX/CRyaR+/YZ6vL3/YIyhuNCv4em7p/OmTGJWWrTwl65dusmU2ES6E8opwaMkeMCLQJwPQj3yjvx8m+u5bCMhQMfOXXTcI04JBI5FwkiqQWpUhPJw/TOR2SmgeDTIDjdzJqidI3mHnuD8FxU1WZ+BdROYmE3qTp4TxcXHt5nlebCfx3cmNKgSBG2iGIPoEt/FOIW7nwBsO1JGNhWy+ZggWXt5gEqsj2OEoGuQth7de2j5gBe0B9bZgepi9zCac8wnCiVUXuodj4Vd8KaieHXs1NWlrXMMXk/uHxrfLCf/2ZxLAgWCng0KabaD0rdp3c7FG8M4xbtA4cS4QphD4LmDUJt2AtgCIs94544QM7YCtmF447EyFDgMdQzmvv2C1bvMfAb9KDx8oPQPGaDZ1bgeiCbjLqmVUVQ6duqsqaTpo1Pjp0tY+EA1iI8eOyljxozVfoXxgQEOZZaYR7uHCa8r1FBPsmrNOh3b4PUzvkOro00MHjRE+yfKIMAMBhNjLhmImIt5BoA5dzGedxBbGA68D/u84n48MVH7k7k392MZh6Dl2YVxlvksiHFwcISOY/b90EFpL0YPsO/jO22ddsO8CF0YLy/oO14N2hvjROfOXTTV7527n2m9Mq5iYJv5jHdE3Ag0W5OpknGle48gpaNi1BmBkse8jW5jjGTmO1IJM2+TYpo+0L1bkKD3EG/IfEA8Jv3VtD/KA2hDGccAMML8QgYw5gXqBuPcPqbMmjVXF1WNiIiUfv36q67G2maMPSZFNmWhnwxmvvBA32Q/HlDmQAxIYoSQe/c+13ZCJjyTdYuxiHmZjJUYJZ6ETIB4qAIDO6oRRwYyKHDoehiEzA+hA0L1eSdOihbi7XoG9ZTjJ0/pmMccg0F49vz7Ar2sV89emnrX6HN8tm3XXgEi+xxJkhPmehgVAK4IcWZ4DZhjEEAv3hWGgz21NEYd5xLvx/2tXbshSQwTnhttfx07a9ZNEhpxn4xvJE5AYOUArrLANc/DWI4sW7ZS2wPzG/EzLCDZr1+werau37glAPK0B7NqPRlNYQCwcDLjBvEzAMuEIDAe0M9oeyyoS9vGMH1c5HcbII/Lg3Af7dt3VPrS43RPj+u9EOzIBPH/QUCGtu/aayG3j/MzM7ER+EqWOPc1Ix6X+8YQA81/lEK8iEmp/SjLfVLLwrDHm5Iiv68GAIcOHExcL+P3lZZ49j///b28fexkEgMk8YjH4xvK98FDiUkzHo+7evi7wGAcM26SiyL48KWknPFrNUB8IJ4CQw36teNT9qfUwO+pgafGAAHpwi3GirIp8us1gDuWFL7wL59WAcUE3YWPD3f9SREyqRB3YF8k7XG5d7weQyNHuAQ2/tZ7g+KDYkGGovETJ98XHf+t13gSz4Pm1tyvuXqVn8T7fxzumYDW27dvq+d3/wGHl+pR3RdezeCQMIEC8bgKKCko7QcfXkri/Xlc7/nX7gvvGSmv7bGRv3ZOyv4HrwE8V336DZD33rvw4CelHJlSA7+jBp4aA2TBoiXK6cNV7u5u/x3181SeCp+YhYayZs2u1Jun8iFFNEd71eq1lI//JD6j+4rMj8MzkAPfnnHm99wTgZME28FBJn1qijhqYN+BQ7qoJush2XP4p9TPg9fAjBmzpGKlatbCfA9+5q8fSbpz0hA/zsLieq/89VU5fsI1E9fjfM8Pcm/Qdg198UGOTznmwWsAup5J1fzgZ6UcmVIDv70GnhoDhKwy8BjJ7GJPO/fbq+bpPRPeMsHlxIEQQPa0yp07d5LwxZ/WZ30Sn4vYrIsffmhlQ3kSn+GPuGfiJgiARhkgCUGKPHwNkJSBVYftqUUfvpQn9wwSXsDVR2FPkZQaSKmBlBp4HGvgqTFAHsfKTbmnlBpIqYGUGkipgZQaSKmBlBpIqYGUGkipAdcaSDFAXOsj5VdKDaTUQEoNpNRASg2k1EBKDaTUQEoNpNTAH1gDT40BAq2IdRTMYj5/YJ1p0VAjWDgqRTzXAPnCSUWXIp5rgJTAJE5wT+NpP/p+++zHParvyaUpflTlPy7l/JYxwr7GwOPyHL/1PngWs5bHby0j5bzka8AxF32XbNrv5M/8bXsYZ+3pVn9bKU/vWVCyU6iMyb9fkoGwYOH9xoRffnGs/ZN8KY9mD/fy/0HQjx5GWKPlfu/nYcr6Xx37IPrxU2OAkPGoX0ionDz5vwm6GzJ0uJW3+X/1Qp+k6yxctOSxCXBH4VqwYJGuL/Jn1qFdiT1w6E0JGzhEF1q03xMZkFjAa+DAIb95wPnxx5+E1cxZtNKsZ8DglZxBQ7D73PmLxL5y85kzZ/V8+72xVgAZUkjyQDD65cuX7bs9fmdl9FOnTsn1Gzes/YaXjlLAPbK6eHJCHv0f78NjZ3Vakio8iBw/flK++eYbXReAFZ7tue+TO59VkBcvXaHghv0Y7vn06TO/mjEmuTqnLBQjMiqxGOP169e1XvlNnbDWBIYS6bJZ1JOFsU6fPu2yurv9fjx9p1bIbHPq1Dty69Yt6xBy44+bEOXyTqydHr5wn6xZ8SCrgHs4/f/dJtY/CAsfIgeda8T80RXAGj7LV6z6oy/zxJbPAseDh0Y+suQZv6ciGA+WL19prfT9e8r6PefaxyWyLfYLDk2y0CPj6urVa6R3n2BdAO9hr8d6GcePnxDW4fo1YW4hW6VZMJL5ijmMRV6NcM9ff/21lkeMHHOLWcPDHOPpk/GUskz2NMbFn50G1ZdffilHjhyVW7duezpVt7FYYnLCPTmXxkruEAUHjh8/LleuOFaG50DW7mHB2gcR1i85/NbbYlZwN+eYOZ6FWn+rECP34aVLOpex7gsJNs6dO6fzGoA+f2RKZXFnrsOcl9y8yTyG3mCE80aPnWgtrmu22z+fGgMEy/nSRx/pwnb2B/yjvl++fEVX+3zU5ZOzHeXjSRMW6rGnjSWjxtVrv71jPMrnZyG/QgWLSKMGjR5lsQ9VFiu8f2hLeUygMYtdsWq4XRhY8+TOK40bN7VvfuDvLEgXGjZImjT1FtbWYLGxrVu364JR9knHFEi/GTFyjEyJjVeFFyMJZaZa9ZrSvHkrXejKrKKMMsvqsFUrV5PKlaved5ExBikWB/Pzayn16zdSY5QV3sl8xcKkrHGybsNmXam2QoXKuqqsuSc+mbQGDRoi9eo2ENpScsKii6xWez9hESYWgKxStYYuHEg9bNq8TQdHs3iap/NZyKtTl+5y4MAhy3gDaZ63YJEussriqtWq1dTFqzydzzYMwM5de+jiXu7HsJhl5crVJKB9oDRv3lLq1q0vAe076sKD3Xv0km3bd0lY2GApUbykLpDXuUt3yZ+vgK7u7F6W+2+SSwwIGyjNmreSBg0b67otR98+rot3sWgjK96zkOKDGJHXb9yUVK+llt69+rhfJuW3hxqgfjEc6eP/CyHV+N27dx/5pd4+dkIN30de8B9cIO38xIlT1lVQPO3girXjT/jC2Ef6+6pVqv4JV3dckhmHhYqNkAwEQMksrmi2swp3uXIVpEzpssL3hxFWKW/eopXUrFFb2gd2luO/AgxPiZsuO3bu0TmIMXloxHApV66iLhJ94tQ7uugx12eR5EaNmkrZMuWkbt0GsnPXnmRvC32EOcix4jgLqS5XxX/lqrW66jvKNtfRua6Fvxy3tRkKZZFsFrlkocb7SbduQbrQqqdjWIcobOBgHYPr1W+oCwVyHGnCWbB01Jhx92XtMPYOjRwplz66bM1B6A0jRo7W52KOZ1V6DJrkhHm7Z+9+csWDodK3X4iUKF5KevfpJ02aNJV69RpIj6DeOhex2CHvrWWr1lKhfEWd83inzFkseGiE+ZRFHKnn6tVr6WKwtCUANBZHZC4j3b4neWoMEE8P96Rte/+DD2XajNlP2m3r/bZo2Vr27dv/2N47q9q2a9fhT7k/kKTlK9fK9//94YGur/caEJjEOPm1k3fs3CV58xaUmTNnK/2NFV1ZzdXL208HQU/nHz7ytsTGJa4ovoEVq32bK9qCsj1r9jypW6+RZdjj/SA97KpVqwVvjSfBoxLYsYsaHSjgUERAw8IHDZWBgyMEIwfk6qOPLotOfh99LOXLV3JZCX37zl068RXIl19u3Ljp6TKKxBQuXEzKlS0vrOruSTC+cuTILZMnx+gq9SgARljZmQUfk5PpCbNlr23VWBZ1q1GjlnTp2kMNC9zoGClNvXx1m6dyTr17Rl566a+6Aq19P4bfqXdP6+rJoFCsFN66TYDcvHlbmIBBx/btPyDUZe5ceSRuarwabQsXLpa0qdMmWfHbXjbZ7TBkoibH6PnU/zd//7sEh4QKnltjiC5avFSGjRhtP9Xjd44fM2acBIf097g/ZePTVwOsQh07dcYT+WCdOneTxYuXPrb3vnbtOmnl3+ZPu7+Vq9fJ3XsPlv1yx45dmib9YenUTZv66Mro//nvf+X06bPSpVtPYaV2T4KSumz5amsXgBjKOethocjGxE5VD42hIK1du16qVq2hYy/efk9y9OgxKVCgsEycGKVzzD//9S/1JLdp21569e6nc9Dbx08KxipUIRT4mrXryQcfJN7jjl17JUf2nNKqRStPl9BtrHCeJXNWaeAB3OR+Azt10ZXZcdKzkn3x4qVcPEJdugYlu7gmc+TosRMEL7yR8++9L/nzF5To6Cm6YjtzEOsRFS1WMllAcMPGzfLMM88I9eYu02fMUk8tRunQIRHS2r+NZs67cfOmjBs/Sb03AJCly5QTFqmkHlmlPnXqdLJvv2NF9RkJs8THt7kAvGBwYTguXLDIuhTlAHB6kkdqgMTGxUtAu/aybv0Gl2sdOXpMaQ8LFi6WHTscy83ToPv0DZaYmFiXY/lBQ+3du5+wGvDde5/pfpBiY0UfOvSmLrIElcAIiN/uPft0wjXbWGwvIKCDxMZONZuSfP7w40/aUXoG9XKx6i68f1FTuPJiQkPDZeHCxAqlkF279+oid6bAjy9flQGh4WotHz+ZiL5AXzhw8E1NiRg3dZrSgDy5sLbv2CW5cuaRqlWqyfQZjsUUz793QU6fOSuf3r2rFibXAlUbOGiIWub2BnXozbcE1AEJCuole/bsNbemnyEhobJ2naMBmvUl3nn3jO7btn2H1ieeArvs23dAOrTvKFOmuLoK2f7x5cvqsh01aqw+c5HCRaVNm3bqKqQM7n3/AUcDxY3H8yEMaH36BKuSZb/W+g0bpUNgJzlz9rx9s8t33sXFSx8LnTkuLl4iI0e47OdHbGy8dOzYWVavXuuyb+OGTdKhQyfdRvsMCQ6RefMXWMfMmj1XRo0eq79v3rwll69cVbdt27YBst3ZZs3BR468LZ06dZU5c+eZTfoJxYc00ChrIO7cL9KnTz/JnjW7BAX1FrwACAMJCKNdaKezZ89VVKd7957W+RxD2wGRSG4Qx1goU6a8JCTMshep3z+89JHETp1mKZ7mAFyucfEzXNDBuXPnS716Dc0h+rln3wHBVW2kVavW6rY2v90/u3XrIe0DAt0363vbuHmbXPzwkrhzfXv16pME2cdr1KxZC6UoJSlMRNvK5Og4NVSWLV2e5JBLH1+WkqXKyP4Dh5LsYwOu7THjJuo7cz+AiSlh1lwXdGrsuPGSIX0m+crNQ4lRlj9fQR2Y3ctZvW6DDB4cITWq19Qx0OxnYrVzZN98653pZdsAACAASURBVIgE9w+ztuEFxUuE271qleqyxPZ8TRs3lQzp0stPyVDPAgICpUuXbuZS1id1vmHTVm3bZuPEqBjZtHmr+ZnsZ2xsnEyKmqz7oQcuXrxEv0NFCw8fLLt2O5BIPFeMl6BuCGONafPzFyySoUMjdTv/+vULEeYEu9B/evbsrWO/ffFQ+sq16zfk3XfPSNduPVzGavv55juIXGCHjjJ/wUKzST/pu3fv3RPGm5UrE5WewUMiPFI0WRGacZ0xFa8GQr8+8vZx/YRi0qVrd7l2PdFIxlBlfvjC1mfOnjuvY+zo0WOTNdwpe+zY8dKjR0+Xd3L12nU5feacXhtvn/t8xnjMeGsEjyFzBPOrHSHmvukLKAlQZPEw3rqdlHrCfF2wQCEpW6asTJzkeOd4WfBeQpVhjKEtoWDhgaRuoLgawftg3v/AgYNd6pljqIPZsx2Gv5mLTp91PB/nBnbsbFFxTJlnzp6TTp26yIgRI80m/UTRpG4BMqKiohXJLVa0mPj4+Mo255zDvLhzlwMhpsts3bZDzz1x4qR07x6UhHqE3tC+Q0edt10u5vYDVBpZtGixBAcPEMYru1AnHdoHCu3eLnv37hPmFWTrtu0KTsycNccan5csXSb9+4dp/2e+/+DiJR2rmNdWrlpjL0qV027dgxRgse+gz5HGe9q06Tr//MMJ0IwfP0kyZcikcwlzMQLKDv3J7o1fsmSZ0i5554BJ9jFv0ZJlev9Hjr5tv6TL93JlK0j7AAfgx5yFom3vz+Zg7jGodz+9B7PNx9tP+675zSe6igGijh07rp54Uk0nJ3imaWfugs4YP22mxryY+dkcM2LUOFm91lV33bBho7T2b2sOSfLJeDph4mSpVLFykvXGuD+AWdqzEUA46tsIXha2eVoOYdOWbWqImWP5rFmjlvj4+Nk36XfeZZrU6axx2BxA35g9d7506thZvUb3MyQZV/oPCDWnyvfOGOfLl68qyEZbQH76WSRjhkwS3C9Yf8dPmyEVKla2zps4KVrQKY1gmEQOHyVHPbSXR2KAMJEuXrpSxoydIKEDwiR37nyywslJ5aFefOEl2bRlu3Tt2kMHqx9/+llGjRkvQ4ZGSuSwEdK5UxdVGpl8Pvr4Y2nUxEst1xbNW6kixwOsXrNO8uTOp9QUJi2UIP/W7fQZv//Pf5TKUbtOfbVy2Xjo8FsyaEikDB40RAoUKGQp9aZS+CRXPO48BoOIiEipXKW6ro1BxTMA58qeU5avWqv3WLRIcaXMcN606Qny/HMviKGmMPiDJGJtDxkSIUWKFBcaLsLk/Owzz8q4CZNkxYrVei8ok+6CS6uZX3MpVbK0LF22Qs5feF8yps8ouMg2bt6qVAo8JIMGR0po2EAZNHCwlCxRWq1OFLpX/vo3qVihkqxYtVZCQgYItJaLFy/pZUByuTesZpCIO3fvSfy06fJ6qtTSuXM3Wbdhk/j5NhdvHz/LgKMBsQ9rt0HDJhI+cLCWdfjwEbWmJ0VFy9r1m6Rixco6+NWuVVvatw9UpRDFL/XraWSMcwBAIX3p+Rf1vletXqcIev8B4VaMw5q169S9h3LCKtu8DyZIuzC4R0SOkKxZsqoCMn/+QnUX4mI1wmTZtl17VSQaNmoiQyOGWUr8ihUrJTDQYYBs3rzVegZz7rDho2Ry9BS5efsTfSaQD9D/sLCBAspuJjDqZVJUjMTETBEfn2aWYkine/H5F6VLl+4yZ+58qVKlmk5ulD9t2gw1QKDZ0FZAXkAshtvQZ85p43xuaE5MtggG7NT4GYroRQyN1HZgp7qZ+9+2faeiQl988YXZ5PLJmgDusmr1GunaLchlM5NFwYJFhAnkjFPp4QATv4LBjlGw26lwupwsosYM737WLM+ePBQY/twFt+8uN3f6l19+Ld5efsr1dT8ePmrCzLm6uUvnrlKtWg3rXZtjMQJZYT1h5mxFqPbs3Wd2WZ+8yykeAAqUN96Jke+++5fUqF5LwsMHmk0un4EdAiVdmnQC/98IdAEWFaTuc2bPpf3P7HP/xEDv1qOXcnDt+27dvCUVK1QW+oiRenXqSbY3snk0QFD8Ur32erIIMHVv4oIoj/GK8eLXBACmZImSsnT5KqWclSxZWq5fvyEY67je27TroNQ61hcqkL+glkuZQT16yUsvvKjjPcoTAEvr1m1lYlS0jBo1RipVqiprnMgcnGFfvxZ6LkAWfRhDDfoMShP1/9bR49KuXXupU7d+kvgprocSxbg0bPhoGTx4qOTLW8ACUNat2yAv/OUFfa/Dho/UOYTjY6dOFxTl0WPGqZHQtm07wWgHKGreorUq4e3aBig1gX6AcZEnb36pX7+hTu4AJ1DduFfKa9GyjVSvUVs+dYJnKE+Dhw7TcYl6A+E1aK6pdwxNDMfGjb1kxPARUrZcRWuxw+rVakra1Glk9twFEjMlTgB7UBgRjMdnnnlOGNMQ+sXIUWNlwoRJWr/FipXUmDL2xcVP13EvLHywzilQgZh/3QUaJ3VcokQpmTV7jty89Ym+U/r9nv2H9B2dOHlKoiZP0fiAMWPGSuHCRdVoQKF8I0s2KVyoiNbN8OGjpEzZ8pbByNyGkUU8UafO3eXjK1dlzZp1Ol+0aOGvtEwUbRYoNbx8jEbGqREjRql3FoUbYaz660uvyIAB4bJ77wGd88aNmyC+3r7SzLeZYEjxDvLmyS/du/XQc2hzzMeM02vXb5SmTb1VwcaAQRgj2nfopG1w+MgxCmC6jw8olqDyuXLmltZt2ul8jVII1cRgAgBaUFiHDx+h6DAL//7H6X3dtGmL0izN9Z555lmJHDZcr8+/8ROiFLS4+9kX4tXUR9KmSaceUebiQoWKWnPO4beOqF6BkcG1zH0uW75K2znbli5fqfNky5at5aeff5GVq1ZLvjz5lNqK8QZwSh8M6tnHMoDQtZo1ayUzZ85RHcDXx88ymnmuufMWyvBhI1Tneuuto9Z927+wnet4eXlLXNy0JOOaOXbm7Hk6ppjffGJQ/+2VV6V5sxaWPsf2v3/7rR4G08Lb28/jPMIBx46flLLlKliAgZ5k+8d4bLzAZjPgxoRJ0Unuc8WKFdIiGQ8IBtHGTVu1XsuXreAR9OEY2nWDRo09ejqgKUHNwqiyC4yB4SPHCkC4EYCIXDlyuwB1Zl7+748/qlHQtUtXc7h+AkBgUGOEp06d1qKyuRzk/MGYARDoLu+ePiMNG3kJIADy3x9+kvTpMkgf57Hcw6AhEVKyeCmPACjnAIgBELjLIzFAQJv8miW6qTA+nn/ueUUuDx48pBWzfftObcRw3FH0GLCN5MyRW5F2UNzNW7bJuPETdde59y7oBMBEB+8NLiJIIsKxxYuVkNuffKJoDEhMF6cyReAStAg8BwjxCR06dpXvv3fNPEDwUc2adfQY/hUvWkKmTo3X+/Tx9pU3Mr9hIUvdg3qrIsNxnFe6VBk5depdPbdf8AAdMExBvEQmWgSFNVvWHLLUiWBOmz5DkXBzrP2TyQTOOfLZZ59Jvbr1lIt359N7iq7gSRk5ysF356XXql1XXYi46ho3bCxZ38hmueswGlCSEW/f5gIPHsFaZdA5+vbbauDYkVKMLt4ddI2qVWtaSjcBSKlefV1iomO0IfMOOnbqKqAJxgXarHlLefNNBy/w9p07UrhQUYmOjtFrMqClT5teOnV2dA4m5MJFijtQiF9+Udfnlq3bHfcXP10ypM9o3a9uFNF3AoeTQd+0D5SfLG9k1/cA1QgDEgUIOfzWUXnlldeUv89vnqtt2/amOFmwaIl6FQwKYgY3Plu0aCkVy1e0snhhQBGLQJubnjBLNm7aouXMnTtPDT8m3UNvHtbBYZET0b185ZqkT5/JUp4Z5I85EQRiQWrWqiMTJzrQRQbUqtVqWQPqgP4DhPaHoARFDhtpDYzly5VXZdrct3kgDCWoOgQsP6hgJKBouMvZc++pMYsh4dXUW/7+90TjhQByDKnkDBDQvdy588qJX+H82q9J/dH2CTC0C+0LhYxgQ3f58NLH8qZz8rt27YYqbO/bAhYBOerUrivNm7VU5WfgwEGSIV1GwYC2CygwiL27YKwysRs5c+68vPT8S9oHzDb7Z3hYuI4XPIuRBYuXWaBFVNTkZCcyjmeiCOrVN0mAHyBJhQqVFEAA2U5ImKn93B0JNdcE+MiXr4Awdj6IQH9AYf01mT9vgWTJ/IZ6ZDkWTjGoLYJBHT4oQscNfs+eM1e8vHx0H56TVK+mUtCGDaC7gDd79jiMQTzPA/o7UDeChc149M7pM+oRvnb9uqKDeYmLatREy+Sft08zpR5YG5xfNm7crJOl6R8oacxFxLywejljJEo17QN0GXS6foPGVjEYCNApMNRQsiMiHYohbbGVfztF9hl3oCGCekIxQcqWKS/vf/CBKjbRMbGqMKNw4m1hrjMeAVa2b922Q5K4JgyHqlWrW4pRrZq1Ffyg7B7dg5R2ZzzKkcNHWu/swoX3pXy5irJlyza9DyZ5vKdGJkyYKEWLFlfP2smTpyRf3vyWJ2vd+o2qbJtj7Z94AFs5AT5WHvdv1Vqvc/X6DS3r1DunZUhEovHSqLGXYOCBtHcM7CQZ02eyPI8dO3ZRFJ7yO3buKus3btJL4SHAs0Nii9w5c4u3c8xjZ5XK1WTSxCj1lDdp6uPiYWEO6B8yQBF+QDvGI6iQUE71Gp26yPLlK/Q7xna1qtWtNkY8V45sOSwUGW8VRtoHHzhoLrwrw0DYvGWrgnTGU6AF4gH75Rf16DNf9uvrQIEJ5YMag2cNEKJmrbqC4oagu0BZMaAs7wrjwAigHACWmUuNtwFgF29YgQIF5bbTU4URBkgJkr14yTLLuwJ485dnn9f5H9Q7b558MnaMQ1cAvMuYMYvVRvBwr3IyBJg3GzVuKmFhg/R2zp49LxUrVbPiOaMnx2j9GYWdmIkPP/xIj/X18dW2BUDsSdBXoP40qO/qUbcfi/fQeMPs27kP3nPmzG9ISLAr9VMNEJ9mGhhtP8d8h4EATdUwQsz2+31itKx0Y01wPN4t2r4nIb7ywEGHzsO4iy4JGO4uGN3UA0Y5RqBdABh79w2WAwddvfTovOiwjAtGABnz5Mknu5xjp9nOJ+MZ8TZdbZ5vdKEVqxOBq/6h4RI+cIj9NJfvjBWMx+5y4YOLUqdOfU1IACjAnJklSzbVscyxV65dlxf/8qI+pyeAFLAC4N5dHokB4gjU6W2V/Z8ffpTcufJKXKyDutOwYRMLzeEgkCYq14ivX0uZNy8RbSQTDBMDyA8oGMFCCAhhghNZxS1Uo0ZtOXvOYZW9e/qsBIc4FImdO3YqGm/K5xOr11iLZjsoFJ0HKgaIfprX0+hEz37cd61aJfI04VTbXx7UriNHHMpM85b+MsNJm+JclH0GINKsIihRxn0I5SzASQXSnbZ/8PFw2ZkUbcOGjRCUX7vAYWfQJvCpYP5C8tYRhxt01MjR4uvbwjoUDua4ceP1N8FVObLnEh8vHwuhYQceI7jnRqBGTZgYpdw+JhQ7mst9eDXx0kMHDRqqx5nzCFDjHa+3Ue+6du0ugwc7GjuDZdmyFS3OIGgviDXvlYGtTt0GsnzFSi2ODCG1a9U1Rbt8Mlg2a9naMgrZ6fDARKuHDFexXUC3Yqc4KH7QLUAYjUAha9zEW0CzKBc07aefHXzSuNip1uTP8VC2GjdqKtt37lE+Z2BgZ+WRtmrZWr0BnI/4+bVQVJjvDDQM9Bs3bNQJm2A+M+izH4rh+IlReh6oerANhSa+AsUZOXPuPVVm/P3bSrfuPVUJw7Njd5dz3MDwQVIgf4GHChacMT1B2tmMMndUdtHiJYq+EQRNFiaE/sJzGo+QbrT9YyDFA+oe0Gc7xOUryO+qNes9Zsr45M4d9Xi+956DzmM/EeQWGhBuXWiKRQoVkWE2FBFvKgF6q22UBe+mPhYtwJQ1NCJSB1Tz23zihUuYkThgoqjgBaWfGTGTMr8xQNKnzWAZgGQW4X2hdJCFivEhXdoMFmhhyjCf9IlevfuqB9Zs4xNlpGaNmlK9Wg0hCB3PFAY9QmwRysa2bdvVe8SEowZgrrzCePggQlnjxjva4f2OnxwV7TIetg/sJHgRkLXrNsqAsMHWeDdh4iSljbHv5ImTGqhvxnDGWRRtIxgckRHDzE/tiyguxGuhlBqjkrE+ykkHMpOzJ2odClqrVv5WeXwpU7qc9HfGrzAuUkdG5s5bIDVsIFT3Hr1lvBMA4xgoZSgQjRo1Ua8m4wTSp19/gXKMAIIREAsdC3nvwgcSHBKm9XH+/HlpaZtH2M9chOJhF9P3QGLnzF0gmTNmltBQh7cNJYh50EhI/1BtW+Y3SurKlY4sWHgK7NQT5p069RrK+fMOg5S+S0YeBGMQb6s79ZZ9tIum3r7WeDZjRkKSuAXQfpQNKIY5c+TShBWcO2/uPKldp55eg38dO3WR/k4jE2prqVJlpUqlKi5xWyjkpj45h/fYL7i/YOhggNiVSfSHypWqaPnTpycIHh27YJDYwQPmUahWRmrXrmuhyJRPPMGF99/X3XjgDHCH8Qpw5EmoMwy0/TbFEQMKoIMMe81a+LtQsgA0wsMc73PrVlcDBCps46Y+alQwzkDPNbrKsuUrLG849wG1qn69Boq8wwjAuOveo6eyBmjbxAMgzZq1lGPO98xv3jtgAFKlSnUXZZB+HDIgTPdBt25loxwBtjK3I9du3NQxCr0Ij0nTJl5CWzSeHT3I+Y8+TEYr6EYELeNp9SQo94AqnuSHn36WKVNi5aUXXpZ2AYFWewHUJubgiy++cjnN9CF0rMyZsrhQXl0OdPtx6dLHyhxxpwVzGH3PrgeaU5nzqQM8QlCTMDxTv542Cd2Q7ZOjY+Xevc9Vh2jcxMtl7iaOcuTocbJztytl/uOPP1Y96KaNrgUwWLBgYWv8x5Sxz0Fly5ZXyp+5R2Jv0FkZ76Ab9u7VV0HK7/+TGAdpjuUTlkyvXol6vNkH2IeXu2HDxqpD0X+gZRuBaYOH94svv1adEyPJbjhxHGySadOTvudHYoD06d1P+f/mhr786hspVrykgBB/8+236jr/4GKiK4njoFDhIgclRxF82+mCun7jljY2Jm6UHJRTgioRjAC8JwhZqJiULjonBDLK9A12IGm7du12QRj0BA//aHBTpkxVZRqkpGkTbxnpVDCIP6HTGiF4CcXPCPSbd95xeBVA46A0GcETUKdOA0XH8E7gqYBegETHTJG2yQRDE6jT0tb5mVjsnGk8ExERw2TYiFE6IKOUmkAgBng7glS/XkMZaePL0iCgcjRo0FhpEzw7isCy5Q7Fn3sj8Gv5ytWyf/8ByZ49p0sHpiF36uxQ8Hv26uMSP4GhgsvcxPdQFm7LEU4FhUm8dOlysnGjw3OAslW+XAXLxQ7fFCocin/deg3URa+V5faP+JCGjb1c3Ih4ejBaVJGokejN4lSQzLg4R/zPqlVrXDwgDFZY5aTDpYMRjG2EwRJ+upEVK1erq57BNH76TJ10yazC+/jhh5+UeoJXCYODdmmEOtmyZauQSpbgM7uxR6DkFKcCExzcXycSc97CRYtV2eT37r37ZUbCHA3Iu3f3nkCBwqAjGMwuKGq455NT/Imlsg9WnDtr1hxp376jVQyTHjxdd+OGicZu3DG5eUo4QDvAWwbiOmTIUKtc+xcGbtBKBPR47PhJyWbwwAvIwOeephfeLBQ1xhCoWygq3l4+2r7NtbgPQA5SFRoZPWqMRcMz20Bz6OvuQryNO40sKKin5MuT3/1Q/U2cBhQVI3goaH8YmvQX4giIAxoy2HO9EL/Wo2efJBnw6LctWvqrkUUmEUMV4TogprShJk28FLjYu/+gENBeqEBhGT16jLkVl0+QfP6MQDN7kEB0lBeSExhpG9DBou/gvRkzbpK2c/ZPjYtXeiLf8ZQRm8Q7Q6AQYYzTdxCADZRN5B//+KcGyhPvQn+CInTsmCNmqlKlKpZiDQrXpIm3FWOmJzv/Qe9h7DMCMojCNXKk4xr0UWII7YICx1iL0spcZOJZUAZ79OilXmM8AvXqN1JqFuf2COojY8dN0GIwzpmUMTQR6FHE80DJunDhgirQuuM+/+ibPDcIM2ML8StGGUChqlO7npVuGo97DxtSibG8adNmLR06mH2eYn6E4vGJs89BGzvs9FST7AJFjjUg3IUxr4mXrwX+MS/aPSv//Oe/BE8PiQ1u3b6j9GoTpzR5crTOB4xVCMpjnz59rUvg1QUVh6ePAY00bNTYmtv5TTuB5sSYj2fXxCuyj+sCLiDEhEBZsgvtdLYtuQTv0HgqMBzQG0ysHOWWLVvBGifQOQAMmTfoz9CdPaX5vnHjlvj4tXBJxc+8jgIPLadc+Uo6Vpv7IrgX2jSyffuOJEotNMAdu/YInvODb75lTtPyiCMxgmefbHlQBGfPXahZ7TDuiesBCAS85Bnr1Kknb72V6O1FlzGGFR5V4+WhXIyIgYMc4xLvzhgc7NuNLuWkIJ08dVomx8SpPsZYRLIMUtQSLG4X5o8GDZoIHjfk6tXrmgiEOdhdAFLd47SI9/rqq0SaLl59wJ8pTgOK/ouninnQLvR1gKfPv/hCadP2OAT7cdy7MfCIvVi6YrV6Ku3HmO9Lly4T6GvucvTYCc3uSNuCesf4XqhAIWnZIhH8oM8BrhughHgnPAlvv51ItwJQJs0xAd52gW5F37THaGHw472zeznMOZ/e/UxeeeVVq12zPXrKVCFTI3MlxlK3rt01oyLJRzwJiVqIV3UXjPR+IWHOOehuEi8PYx1xMEboB8Ti2oWx0lPa4UdigDB5Zs+R23IjwdmkA6L00CDg7oJaGUEhZdIiJoHYAlARhIbr49vCUsxAgvPnL2ROE/82ARp8zIabt25pQI7xLDBBhA1yWNl37tyRho2augTCgAj++9+J/GzK2L17r5QoUcYqv2iRYoLLESGWhWAoI+3aB8rY8Y4Jh20gNka5mTRpsmTPltNCP0HMSHGJMDgwedy86Qg8wg3VIdAVqTfXYFAgUPrW7U90TQCQGxQkI7jXM2fOan4qRxfXIRIaGqZKstnZoH4jiXUG+GOwGGSENGvcL4KbFuXXCA0e1ycDCmlrcY8ioN61atezJmYUKnvQIe+N+lixcpWFWBK8bpQgBgRQL+h1CO2lVKkymmOaiffLr/+uaWJRBKFQJCcE4xGfACqBwMWuW7+RDoQEiMJ9N+5cELOmXn7WWhZM0ASO2wWEK03qtLJx02aXYGOQANz2hs/bunU7Ge5Ukvr0DVHqlikHlJ4AVKhbpKpbvMTRuWlbFStVFdoiA2iBgkWs4EeQi6BefVSxoRySCeTOk9+i6zAJvJE5q2ZBop2Txcoe0Mo7p424CzEwoA9QR+yyYNFSK/2sfTsGEYaQXaDCsc1QS9hHrMrU+EQDm8nMXYkjNa1BgKE54fEiK5dd8I4MiRhuBZguWbbSharFMxkaBedB98KodkdTyCj2Tw8LOTGZkfnJSK9efTXFrfk9IGyQ0hbMbz4nx8S6TMZmH5MyCpBdMIReT5UmCZqHslS7dn2LPsWAvX5D0nZM1i9iAwyKbi/75Kl3ndlZXOkMTJCg1PfLRW8vh++AEoy/7utQ0CYJBjS59jkWgMM+BriXZX6j0MG7NtKhQ2dLYcToDezUzeovXTp3kVR/e03eOnpM3fTVqtaw2mt8/HTt+yaDGgaIGY9QckHLEeIQ0qVNrzGB/EZJHDfWQSlhTAX0OWkzLs19wVPOlTuf9Z4BDcjKYjwp0PLediaC4BzaFoofcXnEDBnaDPvwOA11UrBAgrNnz6W0H/b17NVXxk+YpJelb0LRNIYyZQwIH6zeVehLeEDIXmYEpdoeh8P29957X4oWLaHZ4viNx6uvk94zffoM/W3O7z8gTJVG8xsvEl4wBOAufbqMlqG0d99+8bPRfUjv/bYzGHT1mrVKKzPl2D+Jj/Dxa67sAAwxFGuMaSMk+Xg9dVpLya5evaaF/qK84+E2goGEhxYBDQYdR3j3BLAjjLctmju8vvwG8GD8YAyg/RhKH/vwUkChRfCEGkNQNwBUBnTQuDvTz4YOjbDqkrEXsMAkEMFbhYcM1gXyzd//oe0Bb8X9MmmRRUrbpNMIhWJXq3Z91XkwBrJnzSHE7CFQoFq2amspmYA33KNdaDPEeixbtkINV7OPdgMtkEQXCGCLoSmSNAU2ghG8pTwPQvuZ4PTkkUWpdJny8p4zMQRgIOCTEQxWDF/k2PET6sE2SvOyZcslTao0iroDegG8QpkzcurUacGDYBcMtmJFS8hI55zJvs5deljtw37s/IVLNAbLDo7BVMAItAvnQzlDWNcD2uR/fkhE8gFeZs+ZL2SlQgBWsmbNkQQoW75itWzf4UhIAF3q3TOu9F7KseuJa9escwHXKZt0tsbY1os5/5GJET0J/QYBOK9Rq64G85vjvLybCQlHjPzwoyMGxMydZjvMgIhhoyzjxWyPjZuqNCcT88V2AASAFWKXDMULCvShNxOvY84nHAFPiaf084Qf2IFXcw5gUNjAoR49XRyDcUMGRyPET9LP7UJfN5R8+/ZHYoBQ4IKFSzTPvn/rNtKlWw8rQBs+Hvy3vn1DLKuT7BT9QweqMQGPGJTBNHjSYsKtxMojwLhWrbqKXJPTnuBGBo+7n32u2YIol8nrwsUPNagL2tcIJ5+ZiYjAGQY/0CImeGP1mgqg49AZQetZbAxDiTgTOPcok0T606EJJiWQLVvW7HL02HE1np5//gU1FphIUdZAXRjIsCCnxicIFiyI8IDQgUKQGS8X5BHFmyC9ufMXWhO2uR8UA+guZKzasXufYBBhCJiGBN+3StXq4u/fRpVCkGi8IGQ8IUA0V648GrT03vsf6L1zPko7BhFuPhQ0gr+vXbum9AAQZFLScc/cPwosGasQUOqq1WqoRwILf4NToUKJIUAMFAlXNJ2xpwAAIABJREFUsZHu3YIc1JuT72jO6mxvZJfSpcvI+QsfaKYUbQP9QuS7f30vQ4cO0zZh1m/AIqfhz5m3QMaMHaf8ZjuX3lyDgCyeFyWWOoL2sN+ZCo5jUOSZfIlHiRg2QgjaRxgIoFC8+sqrVvwG23Edo9xDO7PLjISZOtCSphX366jR46wgPFyZIOu+vs0kpH+Y7Nq9T9sVfHIf32baZkFDQK1NlismUdoyqezozPBGc+bMLUWLlLAC00aOGiOFChfVjguqnzN7DsszAELEszJpjR0/US5fvaYZUuz3bL5DvyhTtoKi7kzuvPOFi5clWcSI44nHiomNtzKP/fLzL0ofxK2MQUFWMzx/8JMRkh1AK8qbO58+Pwob1DFiewoXLu5i+JDWEBofAce8W1Ae6oCFjBASUhD7RTwP75KYIiYQkxkKpZb1MQjspv1BlcMgpG9C8TIGoRamwaP71cilnZHwgT4J+EGsAnnMoW+S4YR3YQTaBVmw8Ja4C8hsVHRcEpSNJAKk4aUeeC6M085dummGJsqg35MRizgme/9g34ZNWzQGATSPgGIjGIIoWaz/Qp0Sa4QostwtSBMv1G/QyAVtNecm90k8BZM4cU8oskwKJOVg8rbkF1EP1JtOxJVc8CYTnHUM3uYrV3VsS5s6nWZ6AczJl6+gKlmkCgYNJztLpUqV1YMKcEEMw+Ejx/S9PvfcXzQfPJ5xPFq8IxIDgPbmyZVHxznoHfRl6gD6Ju8FLynBvYz9xOMR/4SHDiOGxCYg6/QFdyHLUZUqNaSVf2s1pk3OeqhNXJt3ZmhHIMosCAq6ylwEILbBGaNATEOhQkWkbbsATYAB8gy9h/GWGDfonwRoQ++kXLzTZOnDq5gtWw6LGoQR5O3TXMsh+Bmvpt3A5/7pi3hM8QwwlxJ8DK2QRC1wu4nHIrHKlWs3pEyZcspxx5NMPyGZSJPGXlYCka5dukuxYiUkqGdviYqJU48tSiHj+1+ee16zPtI3iKlJlya9jgF4a+zC/dAm8ajsO3BYqlSuqujr5q2O7DZ4oUDKeT+sSUBCGDwGYeGDVPnNmCGzxmhdv3lbChUsLJkzZlE6FbFeGMG0RwAClFnmZShK1A3n+/o1V288HlCEd4XByRoDjAXELSFQnYlLKlGitByyxXZFRg7T1KjbduzWIHTmdJKKkBKbd8y7ol8ADBETyW8yryEAMOgLxLUyL6CcE/fiLjdu3XZQywaEKwVN6bY28Aya0BtZsgqUG+gpUGkRlFTa0EsvvuyyeCRjG2MldDa7YCRSz4zTKHmh4YMs7yExR3gn8ICiBBKbCLMBwLBlS39NYENyFbx6MESMwLQAyCMm8cIHH2oCASh0UCkRnj1P3gI6xsJ8IOEFge3Ips1bVMnmXhmLmZMJpHYXEH36jpe3rwweEqnXcT+G39ARYX0Y451txN6wuCYUs3btAlSxJrkO8sGHl8TXp7lkzpBZoELTXrgPUrGzNpVdABVKliwrJC0gLpf2NWPmHNVxoMdDV2SMjIwcrjpQu4AOamiZvnDi1Luq50CHnDDJAWyhuOO5o83AnrALmfGKFCmm4KFh6rzzzmltuySY4f3B1LDrocTT0B/wCroL79MdIOQYZS506KRzKIo9Rg2JLdA3kSVLV2jqd5IIGYPMlM18+9eXX9G09yYpC14YdBxiqumrUKWMpx2KJewU5jR0AcYyd8EDjM6CXkpSBrKTfvpp4tpdzOk8IyAQ4x7lEIeHPDIDhMJAOElVaYKR2QYaSgYXJkQTZAUlhUA0KnLpsuWKWk+Jm2ZZ0gQoYi0x8aEIszI07mJSy5K2jsGTFRcpFxSb4+Dpr1u7Tlg/wAiVB5oNkmH4gWaf+YQnFx8/TV28t2/f0TJB97kHKEUoFFevX1cqAYFtKBUExZJNgmeyrysAdYbJ0fDlQS1xA65Zt06RBegn69at1/UOCCi0W/3mfmgUIHQ0SAYfOjzcPyPcDylo6eBw7lC6CfoELQJZAY3nDxoBHiSzGCAoFh4UOMYIVCEG8+Mn31E0iDLcBYNpwsRJLmmVoRmQUYr1IoznivNw9Zs4DoJl8WRQPxcvfaRZLXgO8mBTJyD0vC8T3Ie1TnYSzp8zZ56m5iRY071+OG9IxEh568gx5fC6B25xH2T2IBOKSRnHNgwQ7gcPjTvS8PU33yoFy/7sGEYkNSC2gAnKXVjwcnJ0jLqOrzvTb4K2oXhwj7QDk6DAnAuqQiIC3L/UPV4X0iKbTGocx/OTEYg6M3El5vyNGzepC50JnAnmfoJCCcUAQ8q8k+SOx0jC/W9XzDkWBZysGMajwzZScc6bt0Db2cZNm2TBgoWyYOEipSqRgthdiK3C8OYccoWblWjxmEFNW7NmrUC/I/5r7rz5VtukHIwdst2B7DIQM2jTHmhTtFUQIDs1gvdOnTI+oECaYECUDLKQeRoDSE+I0ZOckO1l6zZH2nD7MaBkZGFjEKW/g3gawYtDVh9ogfb87Twzyjr0C9ohAYVGuHfKw0sHZ9ik82Scg4sPpZR+4e7ZMucn9wlgwVhB/c+cNTsJvQtlDJTNCOMdbdQ9XSbACO8K7joegytXrun4Aj0BTwWCBzJqcrTg/Qat/bczhSPjFOM0fQmEFiODd0dmFoK5qQ/24+lD6NPwvmk7eBfpS/yR5IE2c+fOXeXBM36QTYg0uZ4ELxRBlXYaBugufY6xwEyw0ISYM1g7x8xFMVOmWmMunHvmEGIoUMppq9wP1DLKpq6YD3gGPE5QzWij6zds0Oc0hgZef6hUtF13+qS5f54ZGhgp1e/e/UwVsfnzFzhifHbvUa/G7U8+1bGfmA/GYsYdVqvmmQytjfJog2SghKqDMPcy3rIGBeMDbQtFjzYHqGSULnMvfFJfe/buVe8u74wYP3s/ZyxjnKHNfvn1N0qZhrZIHySTEdf+9NN72u6ZL41HgkB1+s2Zs444JRZlQ0k8cOiwzi20Wff7gb5Cu0APMAKAQLn0+3O2OZJ2RewE4wWA2ubNW7R+UIBoRwQMUweMDbTLDZs2aV1QLgHjgKacz7ueN2+hZqQEALELwF7kiNF6z7Qb1htyF3QFFD47XRW0mjGc+6av2OUf3/3LAs3Mdgx1YoHw0OBpdW87JNphLqJ/M2YieNegwdD3oD0ZRdOUySfjE22SOZ65HL0EY9wI75uxGQWZNWHsQrl46YlptHus7cfwHa/O+AkTrQxt7vvNb5IhrFi5xsXzwD7ugTlog5NeyDZAC+iBjKXoIfRFxnbmD0PHN+XyiYFO/BLt0s7aoA2SGIk4QtoU4y9jgKHKcy7thfGCcQeQCO8Z4BD3yzunf9iFsYJj6WOM26RRR9CTAMo5xy7opIGdump/t28333mPsXHTPNKq0THpJ/Qj6skuZCADtOTd273d3A99c8fOnTqfGDAKEJ17Y8xgTGNcMHHIjPcwThgnqGd3vcRcl8yxtEHGOPc2CrhKBj6EZzb1Q3jCIzVAzM3c75MbII+0+8qaKNlmsLzf+Sn7Hk0NQA1o1aqty6T1aEp+uFJAs6KiEmkz5mxWanU3QKBVQbNzn5zMOQ/7CQoCGukuKMsjR3rm0Lsfa36TNq9du0AXZdTse5w/6Y9z5i1Ur517YOzjfN+/594wtkD7Jk6KSWJ42cuF6hMxbKR6udzbov24J+07ExGGbtfuQRbHmDoBOAAxc6cHPWnP9zD3u3zFKmutKXMe6zuQ6CNF/nc1AGX5gpM+9L+7quuVMLBB3d2FNNCMk3bBeBk0ZHiStmM/5mG+0//27E0M7DXnrl23waKPmW0P8tmufUcBgHhShPTei5Ys90gNelKe4WHuk7klfOBQNSrtHhH3MlDsx46PUhaMext0P/Zx+/2f7/8jMxJmCymoYYggALGANoRnYIz/zw0QJr/Q8MGa5WTJkqVqVbFSowlMfdwq8Wm9HxCe1KnTKMJvKB9/xrOCXhUsWFRRRviWrE2BUeBujGJVE+hEMDfIrd3z9LD3TZwLC+4pwu7kg5oycDE2btxEg95A/B9EQDnhp8Lhha/9pAlue+rceO6etPt/2PslWxTPaxZBu9/5oLys5QEn92kR4komx0xVb5x5JiYIKIXGo2G2P+2fI0ePlWYtWinKt3TJMlm7doNcvZaUcvO018Of+Xwg9BkyZFQqjKeA+P/VvZFRr3iJ0kLsJR4/qHVk+zFxFeY+AMCIMSCuEW87aPpvFYwuvFzEtLDwnF3wUuFVz54th0vchf0Y9+94haG/pkuXQWPVfs1b7n7+n/Wb+Rx6q6fYxj/rnv7I6xK/SrjBgwiL0gKI/J529iDXedTH4E2Mio51yfiHp7xvvwFWVtj/uQFiHhKXFHx+OPZTpyYGuJr9KZ9/bA2QDpMYEBCfe7a1C/7Yq3ouHY42wW1wx+Fp2l3B5gxcf8RWsOAbAXMgUL9V1q/fKI0be2u6S/cyyEBCykbiH0ywpPsx7r+JIYA737p1G00xDd3mSZQnDWH5rXX8W57zFzcE9Lde+3E4zxPihocHQ/T/owBowJknNgTu+ZPaf5/Ud4fHmfon/olYoD9TiB0loQpxbSQtIWjXXYhbILYiMLCjZgGz01zcj/2131AxiUNksVl3OX32vJBhlHnankHR/Tj7b4wjsg0xf7G2x8Mkr7CX82d9/y1j8591r7/nuiYJx8OU8aTVjaf5hGcwoRg8+59mgDxMxaccm1IDKTWQUgMpNZBSAyk1kFIDKTWQUgMpNfB01ECKAfJ0vMeUp0ipgZQaSKmBlBpIqYGUGkipgZQaSKmBJ6IGHpkBQlD57+HlPxG19YhukuwTZmGoR1RkssWQIpPsGP9fAoyTrYj77CBLBQG4j5N870zF+mffEyn0yMBisgZ5uh+T0tTTvj9iG/dyv+wrf8Q1/4wycWHfc6YifdDrk/HtaQmY//mnn+W77347v/5B6yy54/6PvbOAr+pK/ni7u3WKtrS0UNw1EAKE4BZDAgESHIIEgoZAQnAN7l4guLs7xa24SylQilS38t9tu92d/+c7753LfS8vlLa0ZbeZzwdu3r3n2rnnzBn5zQy80xNR8MxeK8BTm5R9jh744ssvNUPa79Ef8CqK3pFVK4U89wBZr562OAsgbU8LkUHtUbGQ8AR79sWn5bn/W5/jVysg4LnIZEQef5Pi9b+1M36v5ybF65xkqlE+6WcgtWOnLtHysS0v85O+x3/b9RDs7Jm0+g8g7eLDivB/5PuQQq9xk+Yuedv/yOe5d/++dO4aYxVysz/LzVsfab0C0gE+LpFcgJTCBBzu2rVbU6mSwtReAO5R16Kg5rQZs4S00hD1CEihaE9nzP5Tp05rGu29e/dpukWyjjyKSEk4b/4CTS1J0B9EljRDpFckZSMZcTwRqWdJkf0oQnB9lJGGYnczZsyUTz75RC+D0WDo8FGaivtR1zXHNm7cIouXrnDJ2EM6ZVJukrLSFOMz7T1twbN7UmDoH9I0km70zJmzmk6X9Kvr163X9L6khCUVON+V9LikbKVgV3L05ZdfCrFYnHfo0BFN80gqUFJZGiIzEKlZ97qlKzXHf6vt5ctXtD4A6XTdacuWbVqT5+emRHa/zp/lNwWCySRnirr+lu8Nr2K+kNY9hR72gH0+L1y81KUG0cNWv/9fpHitVrWG1gT6/e/uuCOGPnsdE2pgUKvDnVhvunWL0ZojvzQWg9g7aiGl0MMe+NUKCDnrCR72K1fxDw8ge/haT/dfCBbff//97/KQMB+sdb900iT3kOTjtgtoybV7Gvdfvf6BS7pciguZfP1/9PNOmDBZChYqqsUu/4hnuf7BB1bRKe7PuCFLmqfA5QOHjsirr6S2Ku4+zvNSEKtmrTqSM0dODfbs1LmbFvR6I+ObMnPmu4+8BHV9Itq0VyGXhtR6oOAjxdAI4pw4aYoWEuMYedgp+lm0iJdWrfZUsI52pAsg/ztVr0NDGwrFmyZNniadu0ZroTPmz/yFizVrH8GpVarVSPJtNmzaLNmz55TBQxKSfX5qIFSvEZQkd7w5gUQMzZq3FCpK8xwfOuv1YNQJC28qFMV7FC1bvkpie/V1GdcUVaOAW/0GYVrUkVSn1DVIjvr07qt96X78+Psnlb83b95K2rSJ1GKpFFOL6thFC8WuXL1WlRwK5HkVKy7tO3SU+g3CtRAZSosnwgpLkS2KzlKdvGt0jHB975KlJDo6xqoZQ0rg4SPHytp1SesseLruk9iHkvXCcy9qvQj365EhiQK19EkK/XQPwDdMTZ6fbv1rW/xH+bin4Ndfc2WUzRUr1/yaS/wh58K7WOvsAjZ9Y4q9/iEPZbspa8ErL6WSyZOn2vb+vn9SwNLUT+HOyGWeZDPqkpUu4yesV7+U8JxUrxHosYr6L73mf/t5v1oBoQOohtmxc7RLYTpPHWPXxD0dT9n339EDWCZ7xMbL3XuOSrX/HU/teEqKQGEFelqJaqZx8f1kpbPy/O/9nFTBpYjV4xIVyQcMGPi4zbUdQmUpnzKyZ89e6zyKjaZLky7ZYntY06mmikUfwqPXuGlLLYhnLkIlb9KqGlq0ZJmeY3572rZr10GyZcvpkgac4mTP/e0FmTdvnnoDuI4hCl8VLFhEbnz40NsxJ3GeVp6eOi1pMU9zHtnVqJ47dOgws8vaHjl2XGr4B1tVZqOiOmmVXtPgw5s3pW1klBZfNfvs2337D0r/gUMEuIsh6nwU8/J28SyhwKVLm0FiY3uZZi7bpk1bCIqgvYAiDSiquXipoxIxv2vUCNBiqfxNBjhTKBVPlr9/oHVNMvFQEf7ju/etfe5/NKjfUIYPH2HtRthL82paGTp0uLUPQwf8Zp1bmlKrwW/wB0qUJ4UYgxsV2CkemUJ/jh6g2nZ09x7/dS97+/YdrTXxND84RRZJZfxHEN7sPv0HaeHUx7n/2nUbtHDrLzVWnjl3Qd58I5Ma4B7nfn+GNk9EASF3fK/e/bTACPCiNm0j5UdnKlIWkg4dOqqlkRogMT3ipGPHLlqEBEF26bKVWtGbWiDVqwfIpStXtd/nzl8oQUG1XLRTctV37RYj9erWly5dulk4cAYGBXugqI6dk11gzQdlca5apbq0bdfeqvqLcrRz1x7Zd+CQCjndontotVjOGTV6rJT1q6B1I/htYBTf/fAvaRfZQa2GptIjxw8ePKzPA+QiuntPaR/ZwUrtiEUZIQ+4haFLl69K48bNJCys8SPxmes3bpbadepJk6bNhVz+EJjFpctXCtXlgcKQ1vie85jjnXbL4iXLXapnk/awcuWq0iu+j3kEjREZP2GSClbAZKpWrSG797xnHTd/YDHu3ae/ZEibTipVrKyWZo7Nm79QqK8RE9NTq3Oyj+rn3CcmJla++/4HvQTWFyozkyMad2edOnVdqthu3LRZC9dcuXLNgoKQGhFrLuOsZs060tutWNTJU2cUtlQnpJ7MTpxrHlW/H+5UYjx69+4rzZq1kBLFvaVokaI6DvmO1BdhrBGXA1EhF2gIAi+CBufZGQ73qlU7RNq3j5IfnBXJ7RYmc3Oqd1Pplm8bFFxLxowZZw7pFiEPC3DlytVkx87d1jGqLQN5OnjosFYdbdWytUREtLFgKOSmDwisaRXzWbt+o1CsaMiQBAkPbyzfu6VS7dt3gN6Das8QFslVa9YJ8Clyi1er7q8VjDkG3KVw4aJSMH8BGTBoqI6pb779VosJff7559Yzkia5QVgjTQ2ZMHyk1nAxB6nijsWdFJYPkknvTG2NihWryIkTjkrI5twc2XPqXDO/7Vuqlg8YOMTaxTfNm7eAi5Wasbl6zXpr3CxYuET69Bso7lWMzUVQgHLmzJOk5gzH9x84JHv37jdNXbYtWrbRSt72nRMnT5URo8bYd1l/M5+mTn9XhiUMl7ffyiwU1LRT69ZtdSyYfVjkaofUd4GVUbcmsn2UaeKy7d4jzqUS8/Hj70vaNOlkwYJFLu34MWnSFFVC3J8Bb9LGLdvVYwRvtZPdcMRYDwmpJ/sPPORfpu2CBQslpE5d81PwkOXJnU+rCVs73f4ID2ssM6a7Km7+/kGaftTedP6CRRIX39e+K9m/qVAcEBAsjRo3dakyTNFBKppTrZy1ZcF818rE1HuoVbuubNqyTeJ795E5cx7yEnMz4gvCGzXRderoseO6ZgE7g3bu3iO1a9eVnTsdqVtZC6njYGB7ixYt0TWGdS8svIlMmTpNz9uwaYvOQ1LT22na9Jmq7HXuEm2tC1Q7nzU7UT7/8kvlvbyHub/9XPM30BHg0VWrVndJKXv4yFGtHs38HjBgsMIhOQdeS/8PHfZQATTXIhUs6wLp0E3cFwaTd2clypWrV2XCpClSr14DXQfMOe+9t1eYG3aK69VHyleoJEuS8Y7Rduu2HdIwrJHUrFlbFi50jGPg3lQGp1YT60ONGoFaEdtcm3HPWm2PD3tv7371XgYF1bSMF7QH/smaQuxqu8go6d49xlzGZbtw0VLJnTuvFPcqLjPena3eA7yV8HUqUA8Y5OBJIAKaNW8l1avXkL37HHyDtWXJ0hVy5+492bZth46V/QcPW9fH0NGoUVPhGR2Q0v8IMVzbd+6WL778SgYPGaZ94J5ynnUbIwB1sQxRI2LBoiXy0Z27ei+8qKH1GkjuHLmc/eRYY8aNm2jBHOEzy1eu1lS9cXG9JTIySr765ltzSd126RotlSpX1cr27PDkHcCbiafz9NlzOtbato0Ue1wHz9Qqoq3KbXwfOyELAjsFosc6Vzeknqx2VvamJkxo/TBhHkCLFi9RuYH5Wb58JQEOaSf6GhnSDm+Hh+PRvPfgU+WxnTp11lOAy8KbsmfNLm3bddCK8BzAmATc1hByADIHY3zRkuUydNgIC779j+++kx49ewlyx+Wr18wpyW4XL10prGMlvX1SPKjOXnoiCgjwAixfKAYoEsHBtSTeuVhMnz5TXk2V2gpEAwJQonhJ4ZwtW3fIW5kyi3eJkrJ0+QqBYZcq5asC8+kzZ6VuSKgye1xXCIHto7qoQM8A4Rq7d+8RLFI5c+SWdKnTypBhI7SQXdmy5RSO4WkkLF22XIKDa0vinLkCw+d+CMNguH1K+UqDBo1kyrSZ0jOut9bHYMGuU6eevPvuLKGkfIXylQSm9MP3P4h/jUBVBiha5ONTWiioBJUp5avWztFjJyiTQug1ebxhelhCwTtDO3bsVPgHEIuxY8cpg795M2khLPqxfPnKOjnaR0ZJn779NZ8yTAhhA2bMtZs2ba7CH9dGSCtQsIhCXIwQAQOi4Ni8ufMltH5Dhc/RdvnKVfpcvmX8ZN2GTTJgwCBVuugbd+I9C+QrqEI4VlUmrVp3h42QseMmaHXP2bMT9V0QSgL8A1UJ5ToTJk7Wtq3btJMjR44pI+3UqaveggJxKFngwMMaNVU8PgdqVA+QvzzzF2W4S5YsVQhIXK/eeg649m7de+p3p7Blrlx5LYtqbFy8PPPMs0KV1YSEkVpRtldcvJ6/avUaVcqiY3rqOTAkijaVLl1WUr2cSlasXitLlizTwodGEUMo7di5m8YCjBw1RgIDa6ryieBup0tXrsnrGV6X19O/JivXrJc5cxIlfdoMlqv5wsXL0qffIBXMBg8eqhb4zZu36iU++eRT6RrdU2uhIBzUq1dfxzeLLsQiy/VY1Khb8Nxfn9MYn23bduqCg9UZYr5s2bZTvQxLlyyT0Abh8uHN27Lnvb3y4vMvSgkvb1mzbqMWTixS2EvnEgtJhw5RUr5cBV1wWPBZFLCKf+UstkUfDBw8TBYvXiJTps6QLFmyWpZi3mvt+k0qgLFIto/q7LHY3507d8WvbHmNuWCMgrlHUeRdDfRIX8L5H5DF4SPHWEYGc4y5ybgIaxiu/WH2m23i3AVqGEkuoL9nzzhpFN7YNH+s7aHDR3Uhci9M2K//IBk9ZnySa/z47x81nmf/gYPCgpU1a3ZhTtgJAwVwMUPfff8vKVzYSxdmsw+LOzzpgTM+xOxncWcsnT591uySkSNGySsvvWLFpDD3zfw/evyE5M9fKAkMYNSYCXL/wadaGJZn+dwZB2Nd1PkHQZhA3vZ4ME5Mmz5DlWBzDjyP4pwIbMkRsKu42F4C9O/KlasqeMJL4al2AibZf+BQj0YRe7vNm7dI9uy5dFyPHDlaqlbzVwgK68nfnv2r5M9bQHbt2Sf9+vaX8uUqWoYcvPjdonsKPGTM2PGSPVsO2brVVcDhPijwwYHBCpXbvHWHdOvWXQKDaul1mD+lSpWxeN22bdu1WN2uPe+p0pkh3WuS5e0sur5ghGGdbNO2vWzesl2mT5+hbekDqH//gVKoUFGZmzhPeTqCIMSaBa9t0CBMYx34Fnj+PBEVh5mD8+Yt0P4oVbqsbNy4SZsy/yhwt2vPfvW2wRdINoHRBd49fuJkNdJ17OgQ2Fj/MBosXbJUlQKMdxA1vHieyMiOwtwICAhSIyDH8I5lz5ZT+Yk2FpG+/QbK6NFjlWew9k6YMNEcsrZ73tsnNWvXU8Vj8KDBUqFCZeXNGLS4V5kyZWX12g26RpXxLWcFDVMYsLiXt5gaCxhK+g0YIsuXr5TobjGSL29+nRPw+SKFisiLzz0vEyZP04KDPj6lZKtz/bYeRESVO/hL7Vp1FJK5bMUqKV68pKD0U2Rt8tQZqkAwBpYvXyGjR42R8MbN5KuvvlZlM/PbWdQ7SowhvJ4+RIklkcaqNevl6NFjWosLqCRKXf8BgyXNq2nUkMg6i3GiYVhjy4DYMzZexxdKGYYevheEUTVHjtxqOBo3YbLEx/eR8eMnSLWq1WTKlKnKVydNnqr9By+CItq0k1dfTa0eb5Qp5JkePeP0GMZi4JYYPVhvUBQqVqgsa9a6xkX9oGtEW70ucgWyWfnyFSXMyVc///wL9V4Sh7Z40RJdX1u0aKX34D+uP2GCow5czx6xkipVGsGjC3362WcyaNAQIf6M4op8e4T3A4eOypAhQyUkXonrAAAgAElEQVRN6nQa60fb9Ru3qEFt08ZNem/qq0BlSpeVZ595VsaMmyirV6+V3DlzS8eoTlpHbPSYceLt7aPjkbV2246deo9RI0fruTdv3pJWEe1UVty4aYsqYjE9YvUYisnGzdvVw4wCReX5RxXTJi6JtREq7lVC2rRuq3//2f97IgoI1oCixYrrwkeHrlm9Vhc5BvHfv/pKKlWupoOIY0w8KiECpUDbr1athtSrG2p9h3cyvyMjRozS3wg73iVLC4LpP/7xTxnlXODxInDeuPETdUEYNWqMCnzbt+/U83r16iMhIQ+vaS7Os2A9hsEa8vOrIK1aReh1GJAwCLt1EEVngzMYkUWxcKEi+jxffPGF2CdSKZ9Syiy4bt++/STzW5ktLXfg4KES09MBe+BdsEbBrCCsHCg2hijOaGAmZh/bufPmW5bqxLnzpH7DRvL1198KQaNFChezLDiHDx/RSYfAgSVo0OBhFgyFYE+v4j6WRY4F4pWXU6ni89lnn0ulipWU2RuMaAnvUiok2p/D/I3ljYBpCMXnrbcyq7Jljg8ePEQQBqCI1m2krK+f/k3bNKnT6kLPjg0bN6qVg7+vXb/u8ODcu6/BWqYS+ciRY6REiZLy2WeOQGIUnlw5c6vVguJVhmlyjdGjx0jRosV1Ebp46ZKUK1dRCEQ2xILnHxBsfqo1sXKV6lr9mD4bN3a8FCvqJXjboMj2HQWPEdSv3wC1nOoPERVu+vUf6AJ94RhjvHVEG6lYoZKOW/aBr0eR+r9//EOaNGkhFII01KF9B7U8s3CiOHTpFiN4UCCsaM1aRFhZ0/AwYLWDsNC+lelty9INtAmL/oULF1XoA/N/+fJlTQ5Bu5YtWgltqlapqgYDg5XmWbE0QoxLhEJD4HN9fcvpc7OvYXgTwZILoZRky5ZDpk+brr/xBg4fMVKfFQGLBWPr1u16zP4f51WsUFEND3j9AhHoylcSu3XQ3v7s2XPaB3asrjnOfd7O9LZkfO0NiWzXXj2t5thPKSDEMSD8Py5h/U8YMVoFEfdzUEDGeAjGRxCZnbjAaj55ynSNj7DH1Fy/fkPxxZ27dJN9+/bLxImTVfi0B2RrscvBCSrsWBcTETwDCJlYog0heGPdM0qrXQG5dfuOziUUckOHjhyTrdsfFlyD506e4mq1Nm1RQBgrnhSQpUuXS9UqePR2yZYtWyVn9pxSsWJlQUhJjlAC8+TOox7gWjXrSJWqNSQ5KBtCL33zKJo1e44MdMLw8CK+kyWbeqvg/b6lfdUYYjyW8FCEXYi4JOPx+OyLL1RoXLUqKe4fb5pvmbJSp3aInof30at4Sas/MLDY+REeODz08OJOHTtLtqzZ1eDFyU2aNJNyfhX0OvxXG+vvase879y5q3XN+PjeFn/AAIEXbdCgoXoegk2duvXl44/vWdcxfwAjw/JsCMX0tfSvC++3adNmKVrUyxL2aMNzI2hCeFLhGfBTqGvXaF3X+ZtCrX5ly+l+rpM+bXpJTJynv1HasFrj5eEfnlliRCGESAR3Q3jsjaHS7GPLGoFnFyKxAYY3xi3KNzy1QvmKYowKKFV4haFJEyfr+sHfjPka/kGy2AafRIkoUriotu3du4+OT8NT4uL7qPCvB93+69u3vzWmKIjr5VVCwho2sgqqMYZmznQELuMh53scPHREUR5Yx4MCa1pXROk6e/asxtXRFwYJgYAL/0fp4LvMmOGIh8PA+s472TWxA7BT+CTB9hDzO2PGTKpUkjijTkio4H2g3yHaY2Q0hILpHxAkVOGGiMGAT0x1rm/w0hrV/fUY5xYsUFiQCyCMBMxtPPR2op+Z94UKFbHkFsZklsxZ1cjMe+CZNwRfyJ4jl2UcAR2DDGSoa9fullIK9PL2Rx/robt37+mYi7Txa4yu/fr1V089iVuQN+7dfyD1QurJyy++ol4VjD28xzVn8hDGVv78BfWaGJ2Da4ZYmQZPnTmr69m8uY6xPHHyNFXozLMxX1njoOUrVkrr1u2EQPoz586rIRDjb3I0d/5iwcMKbd6yVSpVqmoZ9ZI758+w/4koICdPnhb/AFycDisc2i446S+/+lotW6VL+1q4Yj5Yq9btrN/8bVxsaJxopCYjEVlWyles4gLD4uNR6TNHjpxWJes1a9aIt3cp+f4Hx0LXpUu0NG6cFMcO1MWntJ+cv3DJ+rZor2bSrd+wSXrF97OO8QfeEsMMEhPnJlFs3tu7T4NUWRQ6OwOUaIeSYQgYFhPEULt27WWhEx4xZtykZAUv095sYfZYgrGyhdQNtayatUNCLeUEnHS1av5WoNmKlastBWTqlKkuwjfXZVK1jmittwCiRPVUiMmJJ4nsOe707bff6gTiO0P0Z/ESPhYu3LQHikZWHj9fP4VrsR84Sr68BeSME8u/cNFiCQqubcGcWjRvJa8Q2GyDADAOcMcbwtJerWp1WbNug3pZsIQbwppevkJlS/giGJasPYZQDh2T3+FqRkgJCKxlMc8VK1dJeHgT01xatmqt3hN2rFmzVipUrGIdC60fnmxw4qhRoyXCtvijGBYuVFTGjJ9see/MhbDUFSrsZcVQ4WWBURpCAUORhJhjKPzQ6dNnpGTJ0lZ2I5QXICRYClGWsRZ16tJNLZy4trFAQcDHgFEYmj0nUQOk+Y0FuH5oA3NILc5VqlQXLNAIwbzzkSNH9Tiwv2rVA2T6dIcCglAWHFxTA/WaNWsprVpGqFJpXcz5x0d37kiFCpVUYUWpJOvT9h07BT5BILk74SmrF9pQgDkYwriBJdPQoAGDNDgYuIqhxLnz1brIs3sivDvEWzwuHT32vuDx80RU0B49NqkHhPbtO3SSqVNn6FzCw4RX4IMbrl7Ow0ePafB57z79pGSJklr92H4fFGL1ADgFZnNsypRpCiEBfmYIJTrVK6/KB85Adgw2CAoQi2ChgkVkvs0LA8aduY8gB5SCQHLGiydi7gNJMF5BextgJiW9S0mTpi1UoWjapJnlScGyP278BBk/fqIK+kYJgE8PHjREg+exeJINDQETGBhWSTsB7cWa+1OE92bbjl3SIaqzZHk7swU9atgwXI0InE86UpQdlAzGEl6/D51xPShzvCMeUHfCgAD/MEYJPPllyvjJPiekFgtpfG8HVAzPHYH6WKehsWPGuRgwUIAjIx2eBAQ9PKogCAwhHG/fuUf7g7nxz+9/UGMHHgAD84FPlKtQ2VpPzblsq9cIcJlPJDRgXb565apCd1EUMHAYYgzDLw3hvbAnEdiydZtas/PmzS/BQQ6h+sKly3rN06dP62lLli5Tfm4yyWERb9HKsb4gaKIwPw4hcPJu9FE5v/IKLeK8pqxRMQ/XKAxM8+c7FPxDBw8pWoF2jFPWyZMnH8I8589fKG9mfFMNOPA5DB+G2rTrkCzEsWu37tKpk0Mxo32DsMYKozbnsj167JiMGTtBvy8ePIxAEPx35CgH7AihHsitMYgBN8ZI06RJUxWcaY9HuHadUIXC6QVE9JwB/QcIEMJ6oWEuRhZgViiWUN/+gxXyZc47ceKk1PAPFHsGQNYzI19hxQ8MrmM9K8o9qBMIQ0O5cpUU9sxvPH3G+m+ub7bnz1+Q4KDa8g+nEeSrr79VTxjZUfleMbb4GcYb3iIQCBAKSIItBozsefAP5g7Jc/DiGUJWmTDe4S1h36SJk9RzPO3dRAmpW095TsdOXaV5sxYS5YT9HzxwUBVRwx/v3run8C3G1+Ur16Ry1RoCFA7iuTFIMzagkHoNFD2hP9R7N0CA5kPMi9KlfKVjpy7qgWxQv4HMdCqNpr3Znr9wQcfl0GEJsnjJUoVjv/D8Sy5z3bT9s22fiAKCZs1ANkwYeAaeg2++/T/VwoG1GA30wSefSZduPcTAjJo2b6WeDDqewYZAtchp0YABVasWoIsT+eBjY+OVoQMNIRtJgnPiISRjrUdohlBAcJu6E9aJN998W4Udc4wAKPChEHEkg4a4Yl/B1yLUsVBgrTVWMSbH4MEPvRdYtxj4EAIx7koz6KNjYiXC5nJr0TJC01TSFlwzMSF2gmEYgYH9LNZYKFkwEKaxYAQE1dJTyDASXCtECK6GNm3aojhIg3tftHiZDBjosJbNnj1H0qd/zQWfiasUdy2EpcIIpriDS3j7CJAnd8LCj+VunfOeZ86elyJFvOSgLTCTZ4RRM9HxiOEKhVhUc+fOp65nfjMhiWtBmDXEO+TOlVctDPQhwjQwOEMIpCzCYES7d+/hYnHEpV2xUlX5+uuvVakBk332nAOjzflxcb3UlWpEV+IAagTUFNIVQ6RitVtssHiMGOFwyXIcCAGQMix3eAqAXXii+Ph4ad68pXUIxurnV14QYn19/SwrDw0YdzUCgjWdKb+79+gl22wWaYQchBOsgnZIHFZoLy9vS3nCcwUO+fSZc2qxG+W0XloP4fwjLCzcsnSyi4UisoNDEEfAsyvvpCJFmWY8MtZ4jkOHHDhmvhn4YKxBEPhbvrc72ccyxz759FP1tB0/fsKlaWjdUHknc1aXffwAlgUsDQuVISA1ifNchRngeCwgKEoQ1k9Pmal27d4reH+w1BOAfsNNGTD3QCg0z86cZJwnR4OHJsikKQ5FzN6G2A+sdswrAlmxZvvXCJBeTiHV3pa/8fQSYwJu2U4IzAOHJFiLpTm2Zdt25SF2yBSB3CggkyYnVebwEGTOktVa2PGKArVA4EfQQrjr32+AQoyIC3EnPDfEgGBccCe8Z1gFPRExB8SuIZAQeIqyDBH7hhHITmvWrlXBzK6EczyqUxeNH7O3df/76tVravXHUIV3Aj5hoFR1atcVA91kDALPwiqv2WmqB1jKJZZoYkgQEN0JZQ7L6wgnTOPqtQ/EW40vDtgIMWPGqg/vwvNtYryAjXCuoUaNmkgrp3COsoUFmzEJ8VzMZepFEVNSsmQpIaySNRII0GYn/h2oCR5ce1IEc/1SPqUlopXD+8A+1j/vkmWEGCy8ihhaWEsNkcSA2BS8L8TmwXOJNWT9GThoiM4thDXmVSWnIYY5lzdPfk2nzHWAOyG4GaV/7tz51pgAAuS+vjK/GPN2AnLDmgBf5H1Z9wy8hZgJIwTC/xEClzjlBWICgCtBPHORIsXUOm+uzf2BtaFwwufsCRMiO3QUrO+eCCGZdQaCbzQMb6xxg6YtBjjibFDgQSRUrRZgGU1Rguk7CKWMGBrez3ifUZDgByjuwJV4z8Cg2hoDaK4PaoMED0AaeX6zdnEcY+OkSQ6vYN9+g2T9hodjlhglYkXs/UuCkdVrHEouxgjWHdpB8AsD4eU3XniNx4rqpDCtw4cdhidtbPsPuY81wmTFpHaRj08ZNTbOmOma1ASeiuKP4Q8CWgjyxE4YbIjDPHLsfWtN5DhjEvieoR49eioUEagxSAVPtGjhYk2b7QxJlosXL2kCFCC0GDtQQEyMFt4dnnvpUofhgTE4LOFhggxgxX37OxKuEPs4dpwrfNAgE9yfg7W8caMmAlqDVL4o4uXK+rnIGe7n/Fl+PxEFhMC+smUraG54Oo58+QSU3n/wiS7gWI0NPIqJlDp1Wg3uJUi7Vas20q//AO1vsjbkzpnHcucuWbpcfEqWVpciisDzz71kCWDgdwcNdkxsFjDSYH7yqcM9iGUzKCDI4zfEwg404O9//0qPE/A73xnkNnvOXMVZ2y2rWF2xpPaO7+Ni9WOhA2v88ccOFyHWZgYZNHLkKIVqGSwqQU6NmziwmhzHFWwCL6dPmyF58xSwXJtAZBCGjeBDeyArvn4VLKgNaSoLFiys98Jah8V/gfMdiCUpXaqM0LcQ2PoePeNVGcKFDf7QuAoJhCYtHO8C4VXge0DcM3euPDJndtK0nSwu4HKZhDdu3tagzTy582o8i54soovVNCc0p2XLCMmTK48eIsgxXdr0sm+/I1APfClMGSIQHCgLhMu9SfMIDSA+sP+guooNNA0GZaACGzZukldTpREEHAgrkWFGLJ5YK2H4hoYMTZASJXysBAPgtOk/vCoQWOcqlaua5qp0DnBCOvbu3St4I8ZPmKixLp9+5lB4rca2PxIShitMDKsuBD52wkSHQIirO1fufEJqPwjrkrkHQgCxTnjj7MQCmDdfQcv7wTFqXaROlUamTZuhTREmQ+o6vBcsbKGhYXLOabFHUNq7zyEkNazfUK3cRkklHsdcY+q06aqAvLfvoC7+QAQQsL76yjFfiPMCBgHdvvOxQh+9ihbXgHnGWk8n1JDjO3bt0Qx52tj2H4ssMLet2x7CswhghGcEOxVrW3P9k+9mD4xFEcubt6BLznZw7CzIWKmhWXPmCjA0DCGGgHUOGDTM8hAilGZ8/U0h05Qh+NaKVWuthQmBjXoGZoEhoQJKnhGyOA/jyHCnUGquA/8iyNOdgNHBr3gWO2H9xAI92wlnsR9DgWXcG3iFOYYw1bptpGzY4KqwEJ+TOXNWjfkxbRHK33ori6YdZh/wMDyqx2zzw7TF4gvMhhTMdsIyyvzYvTtpggqgoQQh/xwKqVNPBg16mLkMHlW5chWFHNmhsAgJg4cOFwPLTO4effr0VYGD4yi4QPOw3EN4VkwQKoVHiR9AYIaIx0DAga598KFkyphJ29sVO46hOPmULGXN18uXr0qunHms9Q34FwIrhLGN1MRcm6QsCcMSdC7pQe4ZVFPIAgYB6YWnGgWEINsEpxeYd8qVI5cmMrn90UeSI3suC+6KQcyruLdcdPIvc222xAkAjTWxiQjCJHGBUMqI1wDiA/373//RNQihD9w9sOZ/fudIHELsGJb62bMcawEe1Jw5cul5rMsZM2S0rPrEm8DPv/7GMedmzZptwaJQov0Da1oBzfQtY95AfvWCItKsaQvl0fw+ePCQ5MtX0PJo+1f3t4x5GBry5ysgc5yQarwtKI7MCSguLl5K+pSxYETqzXYG+sfGxkmZ0r7mlopQQBH3RGSNi4rqKCdPndV1AkWahC2GmrdsbaVnBQb0+mtvWB4p+I/JoAVfQZHEiMg4GjZ8tLVOM+8xtGBExUuFpxAC3oRCwnpO7RwgfB2cySiQE5o2j7CMu3jO7QHYrJd4r+2F9WrXDrEgayBXSC1LshSIbHQGDYKi/f6JUzJ12gwBgoYRLDnCGMR3AD2g1xkxUkLq1pd//+ffKsukTp3OQlKwRmGYxMMBNW/RKkkWLBINlClTTuUK4ymlLV5ZgyxBPsEbb5Qn+o8MhYZmJ85XAwTGsvTpXtOYSo6Rrr2DE2aI9445h+cWJZJsfaAUTFwS75Mvf0FLPgqtV1/Spk2n0DUUqFp16ioEi+vCnxLnucb2sZ95Nnq0q4LFftYOjId45f7M9KsVEJhKeOOmkumNTGqZePDpZyrgvvLKq5rliM6FqVInBM8ERcRgfAj7YH3JA4+34ObtO0KmlxdfeEk6de4qH318T70dGdK/Jh06dNLBBN6e9J1k0sK1SV7+iNbtpHp1fyF2hFz49+5/onnlgTkY17f9AyN0sSCQQSeiTaRmauE4mSjQfvPlK6BZf8w5LMS169SV6TNmqWsdC8KFS5e0PgLWO4LepkydqVYe8KUdOnZWrOSbGTPJpi1b5fqNm1LWt5y88UYmzfpx4OARDerFFYvGjaKB8FK0WAmFGSE0eaq0iYCIQoDHok/fAQIjwT1N7YB8efNpxpdPPvtcsbqpUqWWOYnzNVgLCzXvNGGSA7rAxMUtW9KntNY8OOu06hJXQWwGWavufHxPrR+kRUWAB1fpTgTBg0fdumO3pgDlO/FMXB9iIYZZ8p1w7YObhnETOMdYAVP88b37KnASLzM0YYRmTYKJYHlr0DDcEtCw9oGlHTl6nAQEBuv72xctYn4KFCik1i8SCNCvFBrsHhMradOkl1at2lqC2/37n6ilnsXm62//T4DsAJ8jkBOhGzcvChLCLMFwb77xliYquHj5qqxfv0HHCEGqLNDEAJHVw64smn4iWJSgaizjKHkoH1hkDeGuJ7MaMAms9AiDXKffgEGqbOFNQCg3RPIE+sROPCOB6CgvLLwEMjKODQEPrFuvoVSoWFkVIDKmQfQ96WeBBoLRx6vD4gahuOTIkUuhFggIcb3i5eWXUwnpdiFgBMBBgDegLAIFJPMRWVgg3pfnwFMyY+ZsyyqmBxX/uk2Ca9bRIFCyiZGBBpw6lj2yCxFU7YnwlLHAGuLZ5hJcO3matG4TqVBJFj2svNCCRYs1qBdFh2xIBPsyXwsX8XKBt2GlxsNVt14DPQ7kkgxgwNxYhMkWhgeU2DAy7BGjhZX46LETVnYteBm4YjKIkUgDwoL94ouvSM3g2nLOCcfQA9xv5GjlV/CgnTt3y3mSeAQE6zhxt/qbc1D2jSXV7DPb+N79ZKpbJimOYRHGiodgjWLQunWkJmvgGIYLPC2+ZctJ9x6xloWZYyjz8LW/PvsX7RPGJoQSSz+SgILvj0XZEMqXV7ESgiGCLHkmS5I57r5FuQBqQUwd8XyMAww11AEhQBrh3U5YPo03C4GndZv2HoUi5gSWb+bFxMnTFfpJsTPSGBfIX0DHO15LMqRlfSebehNQZvHQE6TMvBuaMFIDcrt1jXZRXnmehOGjNHCXLEMPPv1cZsycJfBbBCC8Q8Ba6HMgoyTzwMswdux4ef/kaY3Vw8BGzM3Z8xelcMHCqlicv3BZDQDp0mVQa//nX/5dExWQfalXfF+FX7K2MI7599abrLXdFeKMQsFax5g0vNfeb6yBBQsVkarVamj/OaAt/9SYlwwZXpchCSOsGB2CyOvWq6/ogoGDBqtBDWEQL8mwoQnaP0BOSHXNmoY3gvckBgsPGokxiOMhuJ2g3xOnzioPZG0gcQuEAMpYL1e+omaoQ6F1J83OV81fkx3E9eqraxyxDIy9/Hnz6xp1+85dWbVmrWRIl0E9ciT+wPBI2mfWErOGxsb11nsFBtVUXsG98KoQU/h6howaA0QCAmBTmd7KbGW6tD8ThTWzZc2hAiYeTRIZMP7JmgnpfCnurcJxvwGDVf7B88CYRlnF440hE+jTK6leVWjguQuXdBwwnvDEkcEKQqGFn4FaAMJbpWp1lyxnKC+MC+DRBKSbTIP0d968+TRe7/CRY3ot1kiegTpC8DlqI6VNm15hxfsPHtFspcT5oGAhOwC9JU33CmfsE5BFguAZA8hkGPaMJ0pv4PwPoyLJEHgXvFW0M/GaNCGOEBmE9RAPmIGfbt+xS9PSItPxPIZQijESkqDHTm3btFVvLbFKjCGC+A0xhwn+B/GAvIYBD/6NYSYoMFjRMngfSedtj2NB2QWdQZ0o5spbb76lvM/Ez8HLUAiRExh/zD9ThBQDE6EH8C88XXaDCc/FOPPyKqnrJ/E2dsLAVSB/QVXkiSu0eyLt7f7X//7VCghuULRQNGTw/QT7Acnit4Fd0Yngxnfu2uXiDgSvj8CDJR73IxYAMNNYubBuoFXi4rW7/kiTeuDAQbVGgivGuoUlGOs5VZFZLLkv2rvJKOLpIxLchoBmCOEM+M7775+0FjZg0yy66zdsFNIJkrqPyr0XL19WGBPMc/v2HQIGEusobm3c5Wzpizt37qhF49KlKwqdwNOANYIJyz+zsPMMKGm8Cxah5IiFF4s/Fl3ORbsmaPXqtWuK49R3v35daGfuhcscLCjPaIhvhkUVz5UhMKu0pc9J+8dzApfCTWms36YtW9y6HGdBAybA96ZvjPWZNlgf6DOCUFFiYCiMA7DI3A+XNM8KRtJeyXrXrj3aHyZQd+7cBZoikAxRMBS7h8o8EzEW9I2B32ApxgJEnATvZVzetAcHynvBoBmruGVpy3Px3fg2xCnwj785ZtIeMz63b9uh3jCwvDAtO1zKPA/BtQQDE9cElMMTg4GRURUceCGEAoJFkTnBWLRnPMJahdBkJyx+7aM6KZ597dp1VkYQexveD6/jlSsObwvHWODWrt+g9+E8k1banEd/mWrczD/6kO9kCBwv8TAXLlwQUlG7E/EcGzZsEvdMUbSjjxESYfAE1JLJjjECrO6naM7cBer2Zlz8598PgQg7d+3WcWG8PVyHb8r4gy8wR7gP4w9e5akqM9+Y+USclfEIch08kggYXAsesG37dsvaa56Xe+Fl49rH339fd/PdOY/xjpHGTnxfvgswplu3bmnCA6Bu9j62twcyMWz4qCTCsGkDfAehGT7lTrzLps2bdW6YMUwbPBvMOTxcZMUx8AmOwUcZA2SGw9JnvCDMVwLjWezpC/rMEO9PW8bLsePHLYiVOe6+BSLJ96AfOIcxwHcEEmk8TeYc7jNl2rsWbyQVKAs68WWeCB6GZ5P5Ay/j2+3es0d5G2sOXsZbt29r9jWe2fQL34k5RZzi/yUTNwRvZ46SMAOlm3WD56e/DL9i/QJGynVNWmw8HIwHng1LOLyMZ2HtIzEJ34nrsv4YoRwewFoHrwXawxjGIwC/Z42jHdeDlzLOzHdy7xPmL3wG2DBEO56X8cY98Rjw7Cj/eNAJFOZ7sEaMHT/JWrPpK74RvJV3sPNz5gBxSgjI9AfPRGD8+fMX5f0TJ/RdzXPxzlybNskRfcJcJDsaAinjlGeCPzvWqL9baxQ8+d69B3pfxgrvZBQQro/MgPEIgRTC2AKvOHfuguAJA46HN4H5wHu4E2sH3jT6j+ci3Twp4ZnjhhDM4aXMHeA+Ki/s3qPXpQ+AcDJW6HP+GSGY+Q/c0owdvBHUgQIehZfR0/MQhE5cpJ1PMS7hK9TssfMbDDWMO4i+ZHxdunxZ+S9yE2s44xDZgT5gvWN8QXgF+AbEe8HTeW4MM/Y0x7SDJ5BuGp7O2LHXI9ILEXt29rzOLbuQDmyQPuc57F4azqG4KePETkCDyRbJe9K/7sSc4nkxvJj+BIWBckw/MLdNv5tz+W0UCtAs9Ad9aI+bYf3gfvbUwuZ8nn3VqtVy+sxDXmiOkTKbecZYMWn+zTHGH+MW2Yx1Aw/kn5F+tQLyvx+HB3MAACAASURBVNxpWEbtbj3eFcZGhgv3AMn/5X74o9+NnPB4Mf5IQunDa2EWcfMsQHFYEN2JbFAGJ+5+7Jf8Pn7ilKUYm/MR9LAi/1wCyrPRmabw5577R7aHSUd3j1Vrv4kh+COf57e+N95aFFw8RAg+jyIUu4jWkZr9zJNH7lHnPq3HEBhJujBsxGgLesuzEmdEYUTS16bQk+kBhE9iUNwJAxwxfyn0+/QAHrSY2N5JjEK/z90f3gWUCd4qOxG/gRJiRx9w/OrV6z+reK39mp7+RoHEC+ZOQLk9Zd9zb2f/jfGGrG0p9HT2QIoC8ojvQqYDv3KVFEsK1IYAw67RPVxSFz7i9JRDT6AHcGMSGAcMyFNQ6BO4xWNdgoBrMtqwSBOLQEpeoHD79rvi+LkYXizcq8BLsCL/GgJ6B1QKbLadwJDCkKmxA7zBbvGzt3P/G0sWbnjgch86IVnubZ7235u3brdieJ72Z/01z8c3pgCWSbv5U9cCjrFx81bL+vdT7Z/24+C/jxx1DUTlmYnHISvc/4qi9TR8Bzx9wHuJGRg6ZJjCOvG4YgU31uSn4Tn/l58BKz3JKkilDvTOjpD4vd+bBCbERpLsZ8TIUUJ2vvg+/V0MATyTPnO37vLCCy8lSabzc58ZSBYwfZQfd88PiRxefukVadggPFlPsPv9vvzqK4Xbs0buP5h0nXZvn/L79++BFAXkJ/ocNyDZYQjEIpsN8JEU+v16AGEeKBPZq+wpKn+/J3C9ExZpMlkQGwQ21hPhFUFBGTBgoJVZx1O7x9lHrAlVag18wJwD3pvq6qQVpgq9Cbw0x5Pbgo2lL/HOEAydQik9kNIDKT1geoBieTHdewqJTlasSJpAwbRL2T75HgCmTIFbii+SGcodLvTk7/joKwJzBL6E4ZX1jmr37sQzU0iQ7H5kPPs1RC0U0rcDW3In1i3uQSyYyXbq3sb9NzBB6m1xHoHmKfT09UCKAvL0fZOUJ0rpgZQeSOmBlB5I6YGUHkjpgZQeSOmB/9keSFFA/mc/bcqLpfRASg+k9EBKD6T0QEoPpPRASg+k9MDT1wNPXAG5e+++lZLz6Xvd3+aJqBPwuETGIXsmiMc972lvR7CoO0zo93hmYh9MyjzuR2YVsp78VkT6UrJj/ZmI7DieMlr9nD4guNI9ePHnnJ/S9n+3B0j7/WvH1/9u7/z0mxG/QeFXslD9GoKPcq0UerI9QIrbx4UNJXdnEnAQv5dcHRzixTwVgU3uer/1flJd80y/hghEf1Tx119z7af5XDJ5khH2z0BPVAEh40DCiDFWGjaKe/WMjZMxY8dZaRlJ4Ql+nQqZpAelQOGIESM19dlPdfj06TNk6NAErST52eeOooNmy7mkUJs8dZpmavB0rZOnz2qaR0/H2Pef//xbjhw9/khBiZiQd2fN1nbmOgQIT546w2O6WtOGLYOqZ2xvca8wTJo7cvxTAIo0iI+iDz74UPOte2pDmjnyt4PJpCrxvHnzZcSIUUJxOdIN7t13QJ+ddrShVoc9laana7rvo489pTcmWLZX737uzX/T32Bky/r6KU7V3Ig4kSHDHlYvNfs9bUlH/HOUJt6dApBTpkxLcjlSKJP5inSPU6dOVwwvsSuXnBXW3U+gDTUjjNCAcE7tEvYvWLBQFi1aonEa06fPdKkf4n6dX/qbAF4K6v1UIC8FFcnTTrDhLyEyOXXq2Flz4X/prCfxS67j6ZwhQ4YJudjtRJHHmJiewjyxEwIAcTn21JnmOHFGJ20pZc3+5LZr167XuTNx0hSNw5k4cbKm3zTtSdkZ37uPUJTNThQ/ZT5S7NITUW+AVJKPSyh1AwYMEmrOmKKn5twPbtzQ6s0zPdxrxcrVWnPJtDVbgr6pBUG2m9+DyCwX16uP1Ampn2z9F56DNMHEJgwaNNTKBMe8XbN2vQwbNlzGjZ8go0aN1hSdJt01qb0JnLUbJ8w7IWST/vOnxr5p/zRuSfFpeBdjmvoGvybhxchRo7UWz2VnUdqn8Z3/G59p4cIlUq5CZdnpoXDnz3kfYvyo92NiUEkMYIqSsoZTS2T4iNE/55K/qC3ZPx+VgVAL6XboJPUbNtIU2L/oJs6TKOyXXKptT9c188HTsf+WfaSmp7DppEmOosV/9HOj+Jpx9ls8yxNTQMjP3T6qs9bA4EGpC0H1Z3I3U0Bw9Zr1WqCKHPIoIRRPS5Mqtbz0/AtatIzMJskRQh0F8igGRkVoKke/O3uuFj6kGBlETY/Ur6bRiq2vZ3jNpSAbVa4pSJQ6VWotDJbcfciFX6JEKVnuQQlgsZr57hytckpVWIrs7bEVfaMg2qQpFG1KWheB+1Evg0q+7gLBsOEjpUu3GC0O17lzVy1ON85ZBdXTcyJclSr1sIKrafPP777TokPNmrXQgokUWqLSLgUDqQBLBottO3aJd4mSkuXtLDJg0BBp0ripvPH6m1qMyVznUVtydVNQcZaH6uic13/AYF30H3WNJ3kMoTJP7nzStcvDAnWPe32Eb0/pcx91Pl6ejBnfEBRhd0L5pkorFYLJZIKw26Jlay10NtnGTMjBT+E/Cn5RqZfq4cwTLEaORaavZH47i1SvVkP69OmvhQ8pQHjjw4c5593v/Ut+8y1HjBr7k6cy3yiYRj2FX0LUbKG4I8XlvviF13C/LznXKVpH1h4UDkMTJk7WLGUtWrbS9MSmzsD9+/elW/ceQvHPiDbt5Natj8wpup07f7Hs2OkoKuZywMMP8sMTKEohx/j4vjJkSILyJuqeQIbXtGvXXpq3iJCZsxItQZEF1VEoq55WibdfnuKWvNPjCv/UWqB9x46dtRgciRFMql7em4JZTZs212Jxdn5CcdTAoFpaAXje/EX2R5CLl67I5KkzrboYLgd/gx8IMrwDmeUePPCseDFPe8b11lSaFDg7/v5JfRKUCNJiR0V1kv79B0pYw3AtBokSj7GlQVgjLSrYvkNnl3o6nDzj3UQtYvbfqoB8+49/yJWrSVN//5pPFBvXS6tAo9Sm0JPrAQpVUnB2zxP2yh84cMgqmIzRoIxvuSea9j25HoC/2auTu7cjEx8FXxuGNfZYN8O9/ZP6feL0Gasw7JO65i+9Dsldbt/5+BedTp0dCiPPmp34i85/0idRq2ryFM/Jdp7EvZ6YAkIFaiaCoaCgWtKhQ0fzUwvRtG7bQYuymJ1lfEpL6lde1WrhZp/7dvGSZapUNGgQJv/8/gfrMNboBg0baZpKmDHVJBPnztPMHc//5W9StEgxawKQI3/48FHy8gsvanVX6yJufyTOna/3Ir2pO31487ZMmDRFi85s3rxNXkv/mgoe9nYdO0eLqUJq38/fFEacNNVVcEUwy54tpxbkM+1ZALLnyCVjx00wu6wtFv+oTl0lQ/rXXTwwNAD6RtYHQzE9462sUVjXqUoNTZo0WYUP0y46urs8/9wLSa5njtu3V6/fkKxZs2sKVwQDTxTTo5dW/fV0zOyjiBxpbQ3B0JLTsk1hQdPWbA2cZ/HipdK2bXuz2+MWxfJfPz4sXIcVB2vS9p27LeHQnOip2BDHTHEwPHBY3j3RhUuXhGqrdiKNYZbMWa1UqmTOahXxMC85wmLD8KYuCnPT5hGWYE1V5HeyZJX69RvaL6t/o9Ta6VGWKfP8tEdJnjZ9ho5f9tstR/aCdLT99LPPhCrExnOAkoSya78Xrnb797Q/E3+TUpFFiSKV7pZ6Y7E2hZh4FgwUyY0HXPLFi5cUvA52OnT4iFSoWMWaSygJoaFhWuBp2dJlUrNmHW1OP+L5M7Rh0xaPhSTNcfctWV/ciYrPeNMo3linbn3rW9InVarU0EKdFAELqdtAT6VInZ9fBWtM7d1/QFM6uxf4cr+P/TfKLcW/DNEnmzY5lKDI9lFa3Zhj9CvWUQqXQVTtpaAdxfZKepeyMu1QQPbd2fO0IKy55uNuk5sznG8fJ/brme9LnYnuPeK0grj9OH9TgI+FmKrRdmKM0N/uNGv2XN01LGG4VnnmR1hYYxlvGytUU587b6H7qb/oN4UI3cuHMb7dC3vaL04/e6LkFHz6z15gEC8aXqN9+w96zHxn5wnGGMbzMG/NPKcQp/s3o3I9VawpimYv9PovZ6FRzmX+mmt6egf7Psa+nedyDIWPPjPk3ndmP1v35zPHON9e7NbsN1v3+lyPmlOML09FSeFvj/qG5l5suUZyUCPznFT/JouTnegL+3elf/lGdmKOcH13Yn/XrtESHt7Y+lZUaJ89Z5429XSOuYb7e5l11Bw3W/1Wtvov8OnBg4dpxkX3lO/u6YKRgaJj4iyDCF5wvoP93qw77gVHzb1/amv61bSjkCLGHhAm9pTRPHNy882ca7aKhvAwGPgmyc1NzqWf7Gsac4eq6Xjm3ecKbe1ETTHQQO4U0yNWU/2777f/pjbPP7/73r7L+pu+dV/HzUFkLftabb6/feyZscgxsuFhyLK/I9fy1K+/pF7QE1FANm7eIqPHTnDRjFEYXk31qhw9/rB+AR124KCjuA2fomxpX8n4WkbZf8BzjmY+TqEChSVd6jTyo+u30/4Ej79sxSqNqbC/PF6AXDlyJfm4xYuVkJA6dc23cNnCuJevXCOvvPSyZM+aXW7edsX52ycZqVhzZMuhFUDtF9n93l4ZN2FKEqYBTGzAoKFaRdS0v3DpsryRMZMsdYOQcBwoxjPPPKtFxUx7trvf2ye79x6QiFatkwikWP4MMVgiO3SSOYkOhsR+MxkGDRosZcuWl6++dhSXQnHBozN8+KNhS7Rbu36TLFu+St584y2tJG7uZ9/iiWrZqo19l/7N9+nXb6AsWbpMdu56T7xLlhYs1lDivAVSooSP0H+GeN6x4yZKSZ8ymprPPmnwfNSsFSLDEkaoMEnVcYiKtniUgGYYmjR5qtSqXVfKlCkrVEWFuO8rL70iRQoXsZ6BidkzLl6KeZWQuF69XQQLCiNVrVpDvRUwB+Brnmjzlq3i7++qgGzesk3eeSe7JWBx7wL5C2klZXOND27ctKqxwlzDwpsKFnFDzZo2l7y585qf1havIv22Zu063UeqxMDAYIWdwPB5d6BhKL8I58BYoPdPnNbzcmbLIW0jo+TuPYdQ3blLNylbtpw0atzMStF7+dJlCQqupYoI5/boESely5TVschvFru4+L7i61tOVtv6XW/k/I9qzuGNmiiet1//QerV+fjuXT3aPSZWfEqWknHOPiWeqkKFKjLZA8yNBbtJ05YKweRvo7xwIQRLavQYIgd8pcrVtQItVZdDQx3Cf7MmzaRHTE9ttnLVGunRs5c55Rdt5y9YKDOcXpjDhw5LuXIVxK40d4jqJDt27tJKypWqVNd7bNiwUbK+k90aY1R4t8/fx3kQBHO/chUFAR5au36jwGf4B6QJ2JKhkaPGSvPmrfRnlSrV5PLly1pQs3ChogoBZK517tpdNm3eak55rC38d+iwEVo7olatOpanAUjpqNHjBB4XHd1DatepJzdvP/Q64akJqVtfBem58xdKbFwfrfTtftOvv/5WgmvW0TENbMwQC7n7Yr5p8zbrGiya3BfC+9vfqXC+t++AVvY213mc7dZt26VXr96K4e/cuZssWODwGqEA+AcES+06da0K7Vxv4qSp0qlLtFacRsBgMZ87d57CNletWquW6m7RPbRaNu0RFPCMValaQ71WVP02hMEpok2k5C9Q2PK6klb05RdfluJe3vLurDkqGAAJNAYm5iBzODAgyIpVa9KkhXpjGWMoMPBIn9K+ikww90IhrVOnniqgQAhz5cwtDeo3lGvXHR6RtevWi4+Pr45jc05y24WLlkjp0mUlKLi28LehVavX6HfftHmLegrdv6FpN3HyVClVpqzUq1ffRRFZscpxfg3/IBk9dpxpLnv37pNe8X2Uz8EDgoNrqxeM8VmrVohERXVWuCknUDGcdYj4AhAJIAXsc4U5AC+jHsr0Ge/qPRCchw4drhXLgfjixYQoXt2te08p61deSFlrCAUhNjZeeR4QIhRs5oSdUM6GDhsppUqV0crtnFMvtIGULlVGK6DTlr5j7QJaRzVtjKwY/jZs3KLfp2CBgurF5lpAoBcsWirUy6Dvp05zNXaae3fpGq3zyQi+1LKqWqW6GiIQ3uG78DRkh6JFiyvElHN5v1dfflVy58rtArVGGS5Txk8iItpYCifpdDGUQqyrlatUV9SGMc6yntYLbSjFi3trn5pns2+BebdtG2mlsWfMTJk6Q3lKs+YtdY0y8jype9O+mlpKl/FTWD/XYV61bhMp3j6lFW1jv7b73xjO4UfwmvdPODystAGVEBISKpUqVZWBg4ZacxZ45+ChCVqpHe8yiBQqyEMLFi2RtK+mkXx58ipvZJ0idfDqNevUCF63bn1td/X6BxLaIFz8/YOka7cYF6W6bbv2Qt255Ah5mTHOfan7ZKfde/Yq/4c3xcf3s8Y9bVDq8SLDT8aPdxi4+d65cuVWpIy5Tu8+/bXdwsXLdG0uUqiIDBqSYBkPGNOgkeJ79zWnyCeffqbfZFjCSA2BuP8gqaHOamz744koIAiAMGk7YZH821/+psJt9ao1tHiU/TiTxrdUGcmY4XU54FZx07RbunyF/PUvf5Eqlatag9scM1u74mH2hdQNVdiH+c32iy+/kjy58khoPccAsB/jbxanO3fva85rBHIjrLm3A2bF8Qzp0svHbsHnPAsTC43cTgxOmLG9inbv3v0kf76CQoAaxEJl6PZHH+s9Bg4cZHYpLGKyk6msWLFSXsuQMdmAQRS9qI5dZf78pJa+Pn36CgzcUGLiXL3Xli3bzC6PWwb9tBkO7DpCdocOUR7bHdh/QAVWu8JAw0NHjsmrr7yqBfqAE61ft0Hy5smnAgEeKmINYHgfOV2XsxPnC8LJp59+6oCSNG6q92Ogt20XpXm9jxw9Ju9kzqp50zm4aMkyyZw5my7M/FYGXidUTp8+I3PmJCojQcDnHpUrVdExQlDfP//5Txk+aqwqfECTWHyMlRyGhJB+8NBhQQjK+PobsmjhYn0W9/9wi1eqWFkWLVosW7ZsFWJtOkR1lO7RMZawjKWhefOW2ufdukbLg08/c7kMFj4skSwChkoULyEVK1Y2P60tcKDq1f2lW7do3bd//wG1rBOnBFwsbep0kvmtzLJk2UrZt2+/Ci0TJ05Sa9SUqdOkZs3a2jcorAjoWMdPnTolTZs0lw4dOuk1L168qAsjP4hPGDd+kio8WEC4z6Ily7Uw1PnzFyS4ZogGSloP6Pzj6tVrqrAjBF+8eFnHDkow/U6fZcmSTRY6+5T3p+Ck+xziUjNnzpK3Mr0ts+bMkyZNm0u1av4qcHNs2vR3pUCBQvLNN/+nd8VegSBx+PBhuXXrloSGNlSFjLF75MgR9eKMGjtBwNIzL1evXuPRqu58hWQ3BP+uW79RjwPpy5kjt+za/RDOBcNHOEKIQYAYP36ioMROnzFL5zyeY5P3nlz1V65cS/Ze7gcQkOBFwL0MEVPEwkTdIkNjx45XryW/8eBhPUUQYaGDEC5YIKG9e/fLkSPH9O+f+g+lhYX75s2byv8RbLF2UjgTIblOSD05ceKkFsw01YgZa/CmxYuXCGMmPLyJLqaerIy0ZSEjHqpAgcIKMXG3fppnZI4i2EAIKwgOU6ZMlXr1Gqj3DmEFAQ168MknAg/9qeQheI8wWAHdxfiC8L9y5WpVpHfsek+Yfwh64Y2byffff6+K/87dexXiVze0oRw8eEgQggsVLCLP/+05WbFyjQqSxYoVt6CqGEtY6L/44ktJGD7S8mrdvHlbeqqn44A+a/HiPrJ//0G5/dFHUta3rPbpnTsfq9JVrFgJ9fTwbngsixUtJk0aN9F35T/GqDFSTJg0VQ4eOqJeueYtWilUlDZ8QwRrFBHmYK2atRUajTcAQkhjPfkqGQ+ONhKROYlzJePrb8quXbvUuMY8Z34Ri7Ny9TpVgDp26iLVqvubU6wt33bchMkydfpMOXL0qAQEBEv7KAcf4vljevZSfrV3337Jnj2nxj5xcps2kToPEH6uXrsm9ZWX+cvJ0+fk8OEj4l3CxxKkMdIwZ5o0baFGIXgz/IdvfezYccmQIaNs3LhJ3ntvr/iUKiN3793Tds8+86zCyfcfPKwGDGIxGBMIuidOnJLgWiFW5XAUfuKPUHYQVIsUKeaCDjEvfP36B6pIzHQqOrNnz5G33soi27Y7alYgAGO8uXv/gXqhfcuW11g8YJrA0PGAcA3GNvFoeMqnTZ8pO3fukiJFvfRccy+z3bVrt+TLW0BjoNh39tx5/X3mzFkB5ZHq5VSSOVNmlcnwlGbPnktWrVqj4xOYY6VKVeTcufPqealSuZrynhs3bkiF8pUso9nadRsEwxKEok7NqR07dso333yja0afvgPVO4JhJl++gro2mecz2x49YuX5517U8YjHBUgX8mRE63aq3AcH15L4+D7afPbsRCnuVULX3Hv37mkfNWsRIaBnkENZI1GQ4CfuRLHNMeMm6rfielWq+itfxiiD8oeh5+TJU1KrdohgVIeQTShuWKRwMZ1LxJ9VqlxV+4QEQ0ULF5PIdu312Yn9+eszf5HwsMbqGR83boLsO3BQQwJ4Nvhjtao1pEaNAOvR4JX0mSfCIBLeqKkQK4LhoUgRLwsST5ICFD+UVf5hiGhQ3/HMyDUYCoECrl27Top5eauySpw249MOZcbAvmHjZkXUUAOmcaMmcu3adV3nl61cI9c/+FC/X4eozhrHCs9fs36jGENq3nwFH7to9BNRQLp07ioMGHfiZYMCg+XZZ57RSQ8221h4fvjxRyntU1oVEAapJxo0cLCe17Bh+GPj+xCKqlUPTFKxEwUkd87cySogf3e6wv7+1TeS6uWXpVKFSvLVN0ndY8BDEhPn6XOV8PJ2UYywwDOYzpw54/I6CFlounbm3bRJM8maJZsuUDSGARol5M7de/J6hozSv/8A6zpYbOYtfGhNwrJjPAhWI+cfuIQ7dY6WBR4EZbTxrO9k03cgIDbTm5ktxuF+HfObBQkrLUGj0M5de5RxsN+dEDARTNyDP5n8bdu2k1Y2eBsCY69eDiZy6dIVKVigsAoLuGaBmPD+LAYNG4RJhnQZ5IMPb0n79lHqFjT3DWvYSIidgVAAifWBqUBYgy5ddhRPQsGpUKGyfPKpQ+GrW5eK5g6vz/oNG1VYnTVrjjKxUj6lxcurhFptsVhQjdxQhQqVZPJkzwFiZ89dkJLePhq/AbwuJKSeWgUYk+4U36u3ZM+WQ5776/Mu1n6Ux6bNWkj9+g3UctqqVWspXLiYxji5X4PfJC8wwiZCN32PAIw1q3VEGxXSjfcLQbR0aUf8EMpbM6dVnOugcJuAeJih+U64tfGqEB+A95JxaGjDxk1aaXbNmrWqeLyW4TW1KJrjZsvixqJnt2JTZddYR/kO450eECrfcy9P1Lt3H8maJascPnpcwBojvAYEBKkXBgEQgRclCiGIhA65c+dVRs214DG8u1ncN2zaKgZO1bVrdylUqJjExfdRxcTTvT3tow+nO6GNHNeFsmG4KlvAQQk2L1HcWwVojgMb9Q8IkrZtIvVy8xYsktFjHJ4uGH758pWlevVAy+rn6Z72fSz0/v4Bki5tessyz3EWmDx58suA/gNVCEQpbtHC4QHhOAIgcUUQVnbjuSIwu3z5Smqx3Llztx5/1H/cH08WHg2KtaLQYNUm/g/PHEI+RDIMBFpozdoNEt394VpB0oWAoFr6PbVBMv+hoGBYQgl1p1u3P1I4JbzZELGBpUr7qlEBRXfY8FHWmkA8XKHCRbXatCehxFyD7eEjR9Vjt3Xrdt3NHGN+xnTvobwpLraXrgWMhZ2796hhA4v1Dz/+RxVsTiLesV7dUOuyKKHwGBRWvJRDBg8VvGK9esWr5/vWR3d0Po6wBRVjWQW6ByF8jRw5Rv9mnuOVRDBjDYEoTDrKOa5+/PFf1hoD/8a6iRCEAF2rZi1JlzaD/PCvH+XLL79UT5XdAhwYVFOOHz+u17x9+6PHik/Cw20SqUydMk2KFfVShYaYHXj64sUO4w2Cr1nv9AYisnr1WrVCm98INMCeeavWrdsJ/MsQf2OF5Z0xVjC33z9xQg+jeGbLmsPyRBKHGhHRWo9t3rxV8uXNL/bx7VvGT2GJ8CnWdmjr1m2S6c23ZOmyFcpLg4NravypHsRrnzhPyperoN8Nz/dLL7wkYQ3CBMgsCjaJLaBTp06LX7kKsiuZIHRiNFHYIXh/2jTpZZRT+MTYZggDYbv2neSmM36Ntb+H0/NPm5iYWBchFkVh/rykhQGxyDdoEK6Cubk28NTNmzert4I1uKdNlmvZsrXUqR2iTfEc422AiEPqHtNTFbBLly4rcsCsw3j9Bw0dLnv2HpCNm10Nm8R8MX7xzONNeunFV4QECO6EwMxz7dt3QA9RJb1wkWKyb58jw+Xo0WN0jeYgCgJGQ0PEHKM8GMKgAqIEI5M7YUDBQw4xRuHneAqN98m0J9AfKDUKA553vDoUOIQwaObInkuVY36j8DDHoPsPPlUlwY4wGTxkmIDSMXTg4CE1sMA3IQxKnhQQ+BieDbt3BON/5rff0bnUt98ATcBirrtp02blw8jcWhjZBj1GyUBBg4hlbhcZZcHX7PBHPDwGYQIki7FDch0U2YYNw5T3fXz3nuD1IuQC2RN+bAzJ5lmS2z4RBQSrQmysAwbDje58/LFLFhoshLhzsTyQKQjC3l/ap5S88QgI1qLFS/UcH28fC66Q3IuY/YMGJ3jMOAEsAgUE64g77dt/QC0MQHr48BnTv6b3Bd6VHAHDeT3D63Lj5sPgYBQQhDUsH3bC+owCYsfl4crHWmogFLQ3giJ4e/rKDsMh6BUrLoMX5psndx5119rvY/7GktShYxePCgjVswlEj42LV88C2cgMXbl2Xd3yBLYSMwPsCsKCjVV18OAhaqXDLcCKmwAAIABJREFUgps+3Wse4WPg/Qmqd1dAuI4GjPZzWGZhrsW9SqpFmGO4DrEMwgSYiAgPHdpHCRCWjh07qTC+fed7miHCYN3NNbEGGerbf5AG/prfCBDzFixWaxHxGZ85szkFBgYp1Ip2EydMkpzZc2qFc56xe3R3XURR+GDIH374MKOSKiRTk2bB4joIKYGBNeXrbx3wNvZh1Xo709sy17kY2D12WEc7deqsViYWbggLPgsSHgms1Hx3E3irDdz+ow1BuBAW2dZt21sxCNHdSALRzDqDhbJi5WrqlsWyBjPBemGIhWPTlm3iW8ZXiA2C8GR5FfXSOCEjqJr2eCpQVhA+o7v3kNiesbJ8mWtWKtqyQCCQ7tn7MD1y4ybNVODnOAwrLLyJWq4Q4pN7X8YCVmhDWIGyZs1pvS8CFkHzxH/4eJdSCIcxeJhz2BKHY5RUYmHwpjBnsHAlznXNXGU/z/1vICXAaewEZh9jTK/43hogXta3nMCg3QnFbtFixzdH6apVq64ybbJa1UkGJmqugZA/J3G+CtDsA8aJwIWCYwjrmFHWUXQ9CUBkyoqOiVVPAIIsiyqePpR+AtU9eSXM9c2WhRZ4QtmyfhLRuq3uRgAPDK6tizU7EEQxIkF4ykm6YAgjSYOwxpawaPZ72i5cvFQFQvdjMT3iLMXS/Ri/x46fZHlF8ZSRLQhhD8/FDKf12dN57Dt3/ryENgizUpwC1UXJZT1j7rWP7CADBwxUgZP4g9C6ocq7+zn5HNcYNWqMREZ2sG6B8IsQtm3nboXp4GXRud62vUyYMFHHYqfOXWTJEofHxjoRSMu//qUCDkKWIbLpATcxsRsISCSEwfMLGWH45KmzFo9DeGWuGC85ln6gcvasiPAVBF3WJbzU9vgSc29PW+AlK1evVX6PooWSC+EVRhAMCqqpFlX3cxcuWiytnWPIfoz4lKrVAlzWG4Q2IJbMtwcPPtHYO+PRIjOal5e3ZewMC2uk3guuCS/yKVnG5T1BRZBJDsIosXjJcsXgv5P5HVm+3KFEE0tkV4CAb+JNZS0k6QhC+6GDB1WZr1uvgSIuuB5rWe2QUMs7ojex/UdMG/2O5R5q0TJC2ju9z7t271UoGfuPHDsu7aO6WNn2yAgHPzQEFAo+bAi+TDZFd8JrhmcJb48hjDg7dzggZHiLRjplNI5PnjJV6jiF+aiojoLhzhCKKx5VjH7eJbwtr97JU2f0e7zw/IuW0sw56lWoVUdatmil8B0MIcRIXvaQKfK7H75X7yVWfYgkRUHBdeTmLYe8NTxhhJQsWVqPAW8rV66iGJh8o/DG0rFjFz3GfyiwGFYm2WLBzEHQBubdzT62rI0GRmb2A8Ne41Ri6jdoZHntmTP58xdUzxNtSUZjjIKsn0DDMLQYAqJuRwzx3DUCggWDDgSvGDUmKQSLeQTczQ6VhV+XL19R+We7yA4y2nberdu3VWG5cfO2Pg8oEE+E17RRk+bCPMMoYo/lhreYviQTJnGHwFp7xvbSdxgzeqwVeuFbpqykS5NeiMt9XHoiCgiZmbDeGDJ4Rffc7ljem9s0v3Jl/VSIP3PO1UKMsgBuHbwjUBk8KBedlmxzD7ZcH7ehofUbt8iadQ44hDlujrEFex/uXAjt+1m4+/TuqzEFTDjS7L74t+ekaxcHtIW2ZLPYstXhGuX3jZs3paR3aZe83F9//bUQfA+0wE4ISgGBwfKtTTBFWCKYHK+AO7G45M2b3xJcyB4Eg+RfQsIITfE6btx4VUBIG+yJunWPFQQkd2JRsSs29uPgfYcOG67WPYLgGZAQAh1uRiAC3J80tH379NMkAPbz+fvIkaNSr35DdbW7H0O4xyoLoU2T3hXXMQQO19u7lFoNEc6CaobIZ5+7pn5lPIB9ZEE31KJFhLqjze8hw0ZqXBC/Ef7BVuMyRjkEImSspExaA7PD+o57250U7uBXXtY5GQPHUTAmT57q3lR/852DnZZee4Nc2XOq5Z59wMrA69uJyYywCLGg9urd3/Lc2Nt5+rt7954WXIxAunbtO8pep0eR/gaKYghBtUmzlmqp552aNnVYYWDQKN5GMKTfDKwH6FDdkFAV0on9sGdzmfHuLOlrE4S4D9hi8Mx2Onr0mOTMmceK9eLbEzSIQmSIRAIoWwcPHU1iGTVt6Cd7jA0LV8fO3VQ4Mm3YwihJA4mL25004LvfQIUDcKxLl27SpYvDg8ZYIonC4xAel337Dz3yOyHsEh/C+9oJpW/OvAWW0E2fkgkKwkoHD3kUwTsQ8BlLhhBAgV+4E8+AQcCdEAYGDh4mGzdu1kMYjcD4sxh+8MEHUtLHV867BX/brwFkCMGeuCGUFzwOxPlAQPVq1amnMBp+I+QbDwzWf7t1ksQfBO5/7jbX7fcyfwNXs8d3mf3EirEQe6IzZ87Jug2Od+Q40BujsMMD2ngQeO3XgZdzjkniwGJdxrd8klTqnMP8+/vX3+o39CtbTrHd7EdwxeNtiLUEIRBFof/AIUmuxffF8uxuGcZjAISTeDZ4siF4Np5PO3WL7i6tWrdVgZrvA527cFHvZ28HTwWG+MUXXyg/B9JjCMU5IqKtbN+5R7baYhzMcU/bPXvek/oNw4UsbRjcWPcwKtF/8BkEYPoCXm+38HMtxj4WXjvxzihdwMN693mIOact14awtqLU3LvvMJiBbyd20ATWAjUz/BXYD7AVeLUh4GBAroBqktgGxAKEULvMaVBBIJ1i4/vjJ06SoQmucZM4oFD0WUvw6ENAKqtU85f39u3X357+I35q8uRpyrc4TpISDJQYTwxsG6E+ukcvK/gXA6LdA0I8C8HPhmrWqqMp4c1vsyVDXP0G4VZMIvtRqHfscMg2hQoVkcE2SzmxVCZ+rkXzloIyB9FXJLeIcMZ7Dhk63PLknD1/UfCe9ezRU2N88XRAGE4c8WmuMb/Ie2Zd1oYiClnFKEVSBOjosfcluJYjHobf8BESeUCMuTKly+rf/NeyZSuFd5sdGFJLlvL1KA/hNTc8kPbwZsYN/evj41Bw2I8sxJg9dMjx7EDCUJgh5kzRol6WwcWnZGlLocXQUa58JQFdYYgYU7uR6e7du1KxUlVrzACR9RwD+Z1mXwU2aAgjfXX/IF1zkcNRDgxhPMEzhBJEOQw8sXayJ2iAD6PU8u1On3loQCezKc8LYYSwJ9Ax18KAZtLHo+RgWB87NmkSJdPevn0iCggBS7i1DZGNKSCwpi7mWBQcNSj2S/Uager6hRGBb8z6dha1FjFAYR648rFQEAS7bLkj6BDmUrhQEcmbJ79aUHH5c73z5y9q7nOsOzAphGK8BkEBgeoKxfJosNm4Hcmkkuqll6Wcr59aXA1TRiiDgdvpH9/9IMWKeTmfbY0uFATxcn2sUgxogl7nzHV1cQK1Idgcd7adGGBglI0lyhxbsmSZ4k/JkEERId4NLdi3bAUBzgPhDcHq7gkTj9UZCxMDA4EIwiXNOcSiDB4y1AVSwsLWvHkLadK4mTVg9KRk/kP4XLJshWbocW8CBAa8KG5Ze+YR4hCMMGU/BwYTFBgkjcMb626spOnSprOYKAJx+vQZZP8Bh8sVdyLu0Rsf3NDvbbTy+fMXqJv65MmTuoChdb/04ktaxwHvAtYFrOSMCRg7CiwCIEJ13rwF5Ny5C7oQIizhHWB83nvwiQbpIkxhYQEHDcOHcD8SkMk+4FxZ3soiObLnlA9vPVR8aQccbcHCRVK4cFG1rjGWWcBRzF9/LaNluR01aqwULlLc2ea+WrQQfE1tEcY1iyDC5ONYHIFgcT4K1plz5zW4mfECDRk8RLJnzSHnL17S++BmNbA9mG6Af6BmqOL9fX39ZOyYsQrXqF07RIKDgjXwVt3D+QrIrdt3NE4g1Sup1WVNDAhCBtY7IBdYId9//6RMnzknSVYYFuNMb74tCcNH6DxftXqddO3Ww3L58qwsNNmy5Xpkyj/idcqVq6SQOPgF7mFT8wXFDSsi1uWyfhWTzE3uwZibv3CJS/YV4Cp4FlGcgfhgceNaYF4JoEyO6Jd2kQ+z/NEOnoIVllgELEowbuPVNNfheKfO3RRja/Z9/vkXyvNIwdiuXaQKfRzjHT3FPLBIksWNoEn6BGEAXLCBlQClYx8KG2MBodlOCDVbtu2QfbbkH8wdBBMWNBTsxk2aaz/cuXPHY6wEfQmOeNWq1SokBQYECzwXPvbe3v1aU8Jk3ho4YJAGM6KcMk7wplKviEUrLq6X/OXZv2owL/1uJwTYW7du6z88pDPenWN5Iky7VWvWJZvFDOGPdO12zzMwwEGDhykGnneE32LIAvLhidatXaexU0CGDBF7Q98TKMv7kryE4G7WG74ttHnLdl2f+HvpkqVStIiX8nSMK1jI5813eNow6JAy+cMbH8qDBw+U71D4k+x4GEwQmFnvgBBev+HwxCIMM+eJBQDygKEBD7sRArgnmSGZc3Pd1igy2vXt11/4rtyPeJBPP/1cvfbAToFvmuBwtq0i2ugct6dJht8b77i+rO0/hCASMfBceLcwOiKgUV9k6PDRyl+Ac6HUuXsGUX5ZJ4GJoahgYNh/wKFYwmPSpE6n8T14eknBTMwHtGPHLhU4jcenY1RHeTPjmxqjiTAJVKpKpSo6F5mbWTK/o7Ge+u1WrJLI9g5PwoL5CxTXj7JEzGPaNOkUSkO/+pb2lc6duljGEeI6A4Nqy7r1GzT73aFDR2TpspW6tgCbxavAewJNYZ0ksUZyqZMR3qvVCLLgVXifkTUMjIW1bPnyVRpnQoIACGNkWFi4fHz3vty7/4nCkFFqMf4gWBJfClTGBGrrSU6jbdNmrdQISXyViSMjvgPq0aOnxnMgVGK4Q2YjsQwUF9dbE9iQ1hz+kSZNek0AwNpHPASeJJRK6h9hqYeAK/MuxEXCH7lWw/Amem36//KVa3L16vUkSTiQlUgQQHp7viFwIWILjHLYpXMXyfRGJlXIuA4xkiBN4NmUekAhIDU5hhWMa8QruPMXno/AdvgRcideQBRtIJAk8wDuSywH1yD5DrIdMhG/vUuWsgx/eGDSpk1vQU6B/JFA4f2Tp5XHIi/EdI+xxg4G7/TpX9caRqzb8A0QKxAxVxjZSKTBvdyJfiSBCUkh4AtNm7WU2XMc2f+o80ZsCvEcfAf6jLgpCGQK/ARYFvIoHiUj43AcGN2zz/w1iXGVMQQE/qOPPlYFuWdsvCbrgX9wf9ZTZFUSaWx2wlT79BtgJXCAfzNfk6MnooDAbLCGGaw7bhygDAgeCcNH6QcGr23w+MQ4oEkxqf1r+KsAAD4Uq0/FCpUV+2fPJAMmDVgQbjSwy2ADp02fZWGlsXSioHh7lRDv4t6SKeObuhiaDB50DJbziuUrin+NAIUXECyEdkptgEyZ3laXMZ2EG5tMPUCpgJ6gWR86fEyFl5LeJcWrWAmpUzdUJ6B7p8b3HeCSDcN+HCwki6E7nTt/UVq1bqeWaqw8uARZ3A3xLGlSp5WOnbqq5drsx3VLf1GjAUH9/ZOn9BDMEu2cAn3ly1VUSI05h5oEMOPyfuVVULf3sWlj327dtkO1d4KVCQy0E0F2hQoU1DTCs23ZtrBGrrdZHM05S5ev1Iwf5fzKKzwLCFvuXHl0Ib1+46aMGzdRcuTIKbgRDWHhYeFmkpk6GEB0YArgeGEIuMBZ9LDIYlkkoxJej9NnzqoAjTKERRkLEa5yMr2gdNy5c1eZ6fgJjkxcMC4WxWJFi6slxwh+CBYI5Fg1iFUA5zh06DAXpYvnnT79XR3nxDwh2LRq1Ua/AxAuYkwMkUcdizfzgmdj/IEDh1jYyfDBAk5Qn3vqRnMN+xahBIUdoWTNug3SpUu0LFu2QpsMG5qgCh5MA28TC7ZRVAnEZXwQIM27E/QOxpYaJQj2WMVQ5lDSyIJh4Adk5CJ2yeCWWUAIuOU9aM/c9kR4ZQjkIwZi6rR3k+DJMRJg8QKH/ShiscUNDOabQEcWCggli7gooF12GIm5FrhemCSeVXfCAJE/fyGZ7HSTgzv29S2vhTsZb54IXkdwpp2wJKMIkHAiuVSvfAM7htecT5IGjBzEF6AMIPyRTYlMR54IJQTIKBYuoIV2tzy4eYJ/gZoZy739GsTxwIsQbOzEos/1yDBHEDTUuk07xQeDP3YncPBFChfV8U4dJLwcZPIBroJhgPlLog68DMTjmHfh+5Cph+84YOAQtTaC8XZ/HoQc+CHzAaXB/TjPg4fHQBfsz8eYpn4I8WruNHL0WDVGGE8wQfj1QsOs9cS0Z8y0aRspefLklR6xvawYOI7jfapYqYr2lwngRxDDE8RcAOtu0qJPnzFTYxjoIzLq2OtMoQzGxvaS0qV8dezYvyPB40DhsPRSs8d40k6cOq28hvteunJN5yrC2sjRDzND8YxY/QkkthOGETyffn7lpU3bDpb3kKxNuXLlkaiOXeTBJw8FBgQW1kI7obQ0atJCTnmYq3xbxlD9BmEaN0jsD7FBjO9rH9xQ2CDJIyje64nUixsZJXgfmEtmDactVlWs0BiPDDyamAv6Onv2HDregX+V96ug8aUIR6dOnxP/6gFSqGBh/b7AXoFaEnuF4QFPLAofRHAtPCTAP1jnR7u27cW/RqC0jGijiXDIIIfRxdCBQ4fVos9+1i0jaPEOeA2A7LB+o1AlJs5PNskFyi3ZtAxCgrULY5GBVCIPYJQinsx4qVFQ/7+98wCvqtjacKSHZkWaBREEC02ld2lSVLxXRUC8otIu2BVEpBNaqGIoKhY6iEAgBEQpUhWNKAJCwNA7hAQCgZDw/c+3kn3cOQTEa0IO5//W8yRnl9lT3r337FmzZs1wFjv6VNGfjdY8+rJEbPjVnnl+W1kW744y5p3KE9tZrOP5zeQ6WrNS/LXoF9q5y6u2DAB75de4rMj0Z6CSQ+tAYtIFc36m4tGxY2eEjJtgXHlv2PimUzidw88mnDe/yGLFins6XWiFZp1FH55PUhrPDlPnl3Uy19RiJ8rS5SvtO1S+bHmz/O3df8C+yaVL3wfOjEbh6AxaUjmBD4UNcKbB54W9+I41zE56/aPlne9lg/qP2jPE956ykVO+V3jI3pXhrmUR6MxN3yY6eB86chRDhgbjzjuLmR8Or+MIoEfqN8SsL+fYJC10IaBDuNM+Zhj6ItGSyXbpsBEfWJ3P4/SrpL8k3xFnJAOPu4XvPSfsoeWOQwHdwjqZs4uxY5CTBrknA2InTtPHmtu3gR3o3nX6mJBxNtOnOz52DD7wQDnPcSpFXPSUShLzyDgpbNvznWQbPXh4cj3EsOxscepZd7zOdrooIIyMGhV7I9kL6f5QHDt2DL9v3epxcGVYKiz79+/39ETyxU3u6dpt2iU/rmkJK3TO1sGxy+4Pa0xMTKr4vecfZ8PA6eGiyZlj+qkN86FkLxJ7DGmCpjDvrKSdG8cH2e08zvBM31vYQxU0aOhFDSsnHHuMOdNSWgsussHBnkzOJsGyuIXpsWeIv46TIc9zRgn2nFKJYk+LY8Lkh5dlZZy8xm2NYaOTvZA0J5I3hwBcTvhB5TW0zFBTdwt7MlkJMT7OVEXh3NdUptJ62ZlXWmAYJ+8d88UPKvPNffZC0uTM3lqnB45xslG0d9/FvZP0MWFPjFu4DgOVVabFdCgsI3vgnLnRGZdjWWDeqbg4Qr70d2HPoLcwDjdL7/NU/NgryGePPTLky2eVDeu0hPeHa0PwvjvCe8dreY4WJg5duBJh2Zg/Z8Ya5xp+SPiBoRKV1ur1VHic557X8D3ke0nh88+8scfeGabhxMtjLJ8jZB0ZGWnvjXMsrV/eG/L1HqLFsHzvOBsQ791fCZ8X9ug4DTKGZ2XKNXRY1rSE8fM697PlDue9OCHrLSorznvlDnupbfbw0frInrRLCeO9lPDeu8tkiuGUGfb+Xeoa9qgzTbfwPvLZupSQlTO0wzsMr3W/E+yQ4YfT3SnivoZTYrPeolBXi9q502au4kec94K9seTOZ5pxO8J9Pl+Xq4NYx9E/iNaoS+WXzNP6XvB+e1t+nLTJmPWHW7nkM82FWp26gWEZL8vHZ4B1qXdeWe/yvfMWsme96QgbR7SGMV3ySUtoYaX11bu+ILOt27Z5hgw615ItGbJDgQ1e5tHN1wmX1i/LSKbsYXfeB5aP9TN/3T2vfFfZK+yEc+LjMA1nVjHnmPObzJdrMiSZRYD1Ir/5lOPHj9v3ygmb1i/vC+srpy5yh+G32c2caTl5JwfWafRdYj3M7zYb9ay/+M3jc09LE4cRsROKvL3XM2Ce+U453wuOu+f7xfYCwzpKgpMnq1sjI80K4hxzfnlP3W0H5/jf/eUzwWeZ98d9j9mByPeCf8nDmPiuxZnFj88DVyW/1HvDNlFa3wTOfjhi5Ch7tpw2kzu/ZOkMdeNx1ll79iR/C3gvWGYyYzim7wjbUe66j73nfAYvVb/yntJyybYPv1H8Ljj3gHWK3c+z51LV92wfur8trIPYs8969K+E+eG3yZsXy0FOTr3ANgLfbT4ffN74y3qKeeK+IzzOuon5YRwcFsxn0y38bjJu51njOda9LDOftcs9O2xvbN+R9mKkbM+yLO732EmXHDmahml4Cydicvy13Of4bLmtyDzHupv32ltofHCs/nyX6Ififma9w6ebAsKIJ02ehnEpswh4J+TP+3Q0e79XP7vplysnP+Q9evZFVNSuywW7Js+FLliIF9q2u2xj6Zos2DWcaVp8xntZrny1OLRI/hix4Yo+FhldhrVr11kPNytp74ZXRqftxM8P3qTJU83Z/EoVUefa9PqlQssefY6/dj4q6RW3L8XDXnvOp0+lhI2G9JaPPp6YykE4vePPyPjYw8rGnlvYqONUs5yS9loTdg5wamZv/9RrrRwZld+uXbsjeNif/kUZlY7i9S0C7Iz4fFLyGkdXM2fpqoAw4xyS4Z7G62oWJrPS4nAoZyjIX+WB2qC3L8hfXXMtnKcfD/16JL5BgGOSucgnhyq5x3r6Ru7+zAX9Fjj0ijOVOcPD/jybOVucEnfsuNRDDq92TthAGjJ0uPUSXu20nfSYB84YxWGi/ixLl6+w5y8jykhLAYdf3HJTAc8kDBmRTnrGScWb951DSTktqbtXmenQiu/4kqVnuhkdFwdTcoKXrFmzpznNaUan7+vxc7gihz2VKln6kv4qvl4G5e/vEeCQTzquc3j5D+uT/Yv+Xgz/LHS6KyD/LDu6WgREID0IcOwm/Wy4bgIXJfRVoaJEPx73sApfzavyJQJ/lwCdnvkOcnpdTgl+LQiHinCBRfoD+JNQAZkxcxZGjhpta1D4U9nSoyycPIIW8zFjQvC712LK6RG/4vA9AnRcf6drt1Q+hFczl36rgLCXhjOkSERABERABERABERABERABHyHgN8qIJzGjAvV0ElbIgIiIAIiIAIiIAIiIAIi4BsE/FIBiYk9heLFitv80wvCFvkGaeVCBERABERABERABERABEQAfqmADBs+EvkCcyNnlqy2GF1CBsxsomdHBERABERABERABERABETg7xPwOwUk7ky8LTqUPzAQtxUshDy5Am0e87+PRleIgAiIgAiIgAiIgAiIgAikNwG/U0AGDwlGvtx5cEfhIihR7C7kCwxEk8apV3JNb4iKTwREQAREQAREQAREQARE4MoI+JUCwtUmKz5cCbmz50DJ4nejxF3FUfiWArgx3/UIX7zkyogolAiIgAiIgAiIgAiIgAiIQIYR8CsFpF+/IFwXEGCWj5J3FTcFhFYQ+oI82qhJhkFUxCIgAiIgAiIgAiIgAiIgAldGwG8UEK72XKRQUZv56ub8N4AKSKkSJVHgxpuQJSDAjo/5cOyVUVEoERABERABERABERABERCBDCHgNwpI6PyFaN36eQQPG4F33umGu+64EwWuvwEvvdQOIWMnoG3blzB8+KgMgahIRUAEREAEREAEREAEREAEroyA3yggiYlJnhIfPHwUd9x2u1k9li5b4TmuDREQAREQAREQAREQAREQgcwl4DcKiBtj1M5duL3IbeYPsvjrb9yntC0CIiACIiACIiACIiACIpCJBPxSAdkWud0UEPp+LFiwMBPxKmkREAEREAEREAEREAEREAE3Af9VQIomW0CkgLhvt7ZFQAREQAREQAREQAREIHMJ+K8CUuQ2m/1qQVh45hJW6iIgAiIgAiIgAiIgAiIgAh4CUkA8KLQhAiIgAiIgAiIgAiIgAiKQ0QSkgGQ0YcUvAiIgAiIgAiIgAiIgAiLgIeC3CshthYvaLFhhGoLludnaEAEREAEREAEREAEREIHMJuCXCsimLb8jMEdOWwdk9uyvMpux0hcBERABERABERABERABEUgh4JcKSFTULjRr9jiqVq2B71au1s0WAREQAREQAREQAREQARHwEQJ+qYCcP38eR44et7+z5xJ8BLWyIQIiIAIiIAIiIAIiIAIi4JcKiG6rCIiACIiACIiACIiACIiAbxKQAuKb90W5EgEREAEREAEREAEREAG/JCAFxC9vqwolAiIgAiIgAiIgAiIgAr5JIGDzlt/Rv38Qpk6ZhpkzZ2HQ4KHo06cfli5bkW45Pp+YCP5dTnbt2o3PP5+EgwcPXS7YRedOnjyFbZHbUx2fMmUaevToiV69+mDkyA885+Lj4z3bvr5x+vRpy2JSUhKmz5iFteu+T5XlQ4cOg2WXiIAIiIAIiIAIiIAIiMC1RCBga+R2vPLq67g+b34UKVTEFJCnn26BHNlyYNToDxF/9tw/Ks83S5cj4ucNfxnH+PEf2bS5K1as/MuwToCYmFhMmjIdmzZvsUPRJ2Lx0ovtcMtNBfDuu++hb9/+GD5iFN56uyu6deuOEzGxzqU++5twPhGz54Qi/uxZy2Ni0gUUKlgYzZo94cnzkaPHMHvufJz9h/fGE6E2REAEREAEREAEREAEROAqEbAhWAmJSSjzQFlUr1bDk2zdOo98NzaBAAALRklEQVSYQrAgZSG/c+cScMFzFvCeXerQoUNweu0ZjDNRbdy0GRUrVcUnEz/FyZMnXVcjlaXj7NmziNz+B9Z+vx6nz5wB92Nj/wwfFxeHRHfiKTHR8tH13R62dzz6BGrWrGN5XrV6Taq0Joz/CEWK3I7du/ekOk4Fhuk5ciImJpXCxfNpyfnziTh+PDqtU6mOnTx1KlW4+PizoILhSFyKlcPZP30m3hTA2nXq4dDhIzgTH2+Wo58iNmDzlq0WjMc6deqCbt3eA/MbF5dsKeHJxMREXLhwAdHRJ3DmTLztO3HrVwREQAREQAREQAREQAR8gYApILt278X99z0AKh2OtGz5nDXmF3+9BImJSWhQvxE6tO+EuJRGcom77wGVDsqHIeNRqVJVPPpoE1M6eGznrt2oXecR3Jj/elSuVBUDggY7UaPbu++hbNny6NW7nx2j8vDKK6+h3cvtcepUHKKjo9GoYWPUqlkLc+aGolfvvqhSpRpCFyz0xMGhSWvW/oB1636wY4MGDbH8jgkZ6wnj3tgWuQP7Dxz0HFr/UwRq1aprloWDh4/Y8ZGjx6BUqdLo+X4vzA9bhDq166Jz51dTKUPRMbF48aV2qFixChYu+toTn/cGh7A1btwMtWs/grXfJ+dx9dp1qFSxCpo2aYrwRUvw8svt0bp1G5w8FWeXT546A7feXAB3Fi2Kho0aY8OvGzF16nQ0adoMofMXWJhBg4ORO2dOlCtTFv36D8TY8R+jZvWalldampKSLqBhg0fxzDMtEashWt63RfsiIAIiIAIiIAIiIAKZTCBZAdmzD5UrVsF9pe/FgrCF6NWrN7JnyYaOHTsj7nSyheCZp1ugYIGC6Nt/ICZNmYY333wbO3fuxsSJE1GyRCmsX/8jgoIGolSp+6yhTwvIq6+8hiwBAZg560scOHDQlIs2bf6Dtm1fwt69+1CzZm0MGDDQEHTs0NEUiB8jImx/0KDBtt+gQSNEbPgVBW68CXVq1fXgOh4djS6vvoFz55KHiDVt0gzZrsuCTSmWAk/ANDZmzpqNEiVK4aeInzFp0hTUq9fQykm/iruLFbd0Q8PCMWjgINtem6Lk7N6zD2XKVMCwYSNs2FflKtXx7dLlF6Xw84ZfkDswL+bMnQeuyn7//WWxadNm0CJStkw5i7NvvyB8MXmqbffu3dfioNXj/lL3okjBQghbuAgJCecxKSVMi2eetTDTps9A1uuy2PAy5nf3nr24t9S9Fs++FAUrLGwhJn76uVlDLsqcDoiACIiACIiACIiACIhAJhIwBWRH1E5Uq1od1apUxRtvdcVTTz2DkLHjU2Wrf/+BCMyVGxMmfOw5TstJ1oAseLL5k3aMw6hyZs9p1gweGDd2vDWMf/tts53/5ttltt/2hbYIC1+MvIF5UL5sBTv3wegPzO8kYsMvtr9gfpiF/eSTT23/3ntKo0qlKriQMhSLw6OGjxiNhIQEO9agfkPkzpELv21KTosXbfxtEwYOGoK5c0Px1VdzMG7cBMSejMMbr7+JbNdlRej8MFOaAgICMG3aDEunTq3auL3IbbY9d85cy8OixUtsf/bsObbfp3cfcGgar6tbp56dc/8LCRmPG/LfgCFDhhovhmvz3PMWhIoc87lv/wHs3XfA4ujUsZPn8mqVq+L2orfDscqEL16CfLnzokP7jhZm2bIVyJktR6r7s2rNWuTLkw9jU+4ZLUoSERABERABERABERABEfBFAqaAbN8RhdKl7kX37sn+FGlltPN/u6BcuQo4cPCw5/T2HX8ge9Zs6NAhuXG8ZWukDbmq+ODDFsZRQL77bpXtL1wYblaKti+8iKHBwzFgQBAWpzTuB/QbYMoLrQeUSZOnWON86rTptn9PiZKmgJxNOG/7n30+yePcTqWkdavkIWNbft9m5/lvwy+/4s23u6JwwcLInTMXevfpj5jYU3jrjbdw8/U3oHuPnhgaPAzDR4zE1q3J11V86GGULlnK4vh04qeWh9lz5tn+9OkzkTN7DrRr1x6DBwdj8KAhCEvxkfEkCti5G/LmR4tnWthwrqCgQaBfysm40za0LG+u3Dh27Lj5dVA5eeuttz2XUwEpdGshkCXlq7mhyJMrN8ifMn9+mFmnhg0fafvOv3//6ynkCcyDyB1R2Ltvv3NYvyIgAiIgAiIgAiIgAiLgUwRMATkVdxrlylZA9/fev2TmXn/9DTz0UEWccDmH79m3H1kCrkPrVq3tOvo6sEH9zjvdbH9Y8DDb/3Xjb7YfHr7I9idPmXpROkEDBiIwRyAid+ywc/PmhVrYWV/Otn0qIHVr/zkEq3/QELNwOBFxtq1sWbOBlhBvKV+mLPLnzec53PLZlsiRNbtn371Ro1oN3HN3STs0Y/oMy8OKFAXqyy+/sv2ly1NPUeztHz923AQLx2FUbjl89Dgef7w5ArPnNAdyWkHIixYVR+jbUeyOYtj+x04bQrXk22XImysP3nwjWUmhInPTjTchJGScc4n9zp033+Jq/uS/Ux3XjgiIgAiIgAiIgAiIgAj4EoGA06fPIDx8MfLlyYvmjzcHrRp08HbLuYQEPPHEkzZkauWqNZ7ZsNjwHjI0GGXKlMfq1WvQ7uV2oHP6sZQhQB988KE1ikePHoNDh48iJjYWLZ9tbT31HGLFtH759TdraDs+IMwL4x3QP8iu7dunH/YfPIT8ufOgaKHCiD5xAht+2QgOTaKfiVs45S4b9LQGbN68BX/8EWW+F1WrVLMZvpwZqD6Z+JmFa/H0s2b5mDptBlavWYvDR46g4C0FkCNbduzasxfBQ4Mt3MhRyWuJbN223awpJe4ugXXr1mHNuu8x/qOJF/lacG2VPHnyWn5Xrlxl5eRMVjt370Hpe0pbnNwPX/S1bTd/7AmbtYplqV61hlmRevUZYMO0QhckD0Vr1vQxJCYlmc/HrQUKglMl79v/p1M9r61fvxG6v/ueG4m2RUAEREAEREAEREAERMCnCAT8sP4nPPZYc7CBy9mT2nXodNHsSVG79qBFi1aoVLEy3uvRC7SYuCVk3ARUqlTFZsHa4nICP3X6NDp26mzH6e9AiT+XYDM/0Rm7c5fXbTG96BMxaN/hv3j44YqgQ/aevfvQsmVr1KtbD61btUHI+I/xwn9exIPlHzTfi48/+QwLw1NbF5z8RG7fgfr1GuLJfz2FNs+3tbJNmz7L40zvhBsxYhQqlH/QGu1Dg0eAU+KO+XCc+cI0bdwUffoPxOAhw0xxadnqOXMg57WrVq9F9eo1wCl/23fsjF1eU/s68X+95Fub2rhu3fro0bOPMY3YsAE1qtdC8yeeRMi4j9Cly2toVL8hatSoheXLv7NLqZhUqVwNPXv2Bqc+pvJTrnwFtGrVxpNWv779UblyNfz8y0YnOfsdMWoM/ojameqYdkRABERABERABERABETAlwgEcHXwEydiLE8J58/j6LFjNu2uO5N09OZUvBRbkyONVc25ON65FP8M97Xc9lZYeIyO0pxul0JLBtfWoHCdC+aJvxQuyOesIZJ0AbZuB2e+4poXl5MjR46aRYO/lxKWiLNzORYf97ofMTExnjJzTQ0ycAt9ONxrlbjPube5JocTL1eDZxkoZ86csVnBuM01VTj9sCPuIV3OQpCcXpfrozjC406+nWNr1q3HkaOXLq8TTr8iIAIiIAIiIAIiIAIikFkEzAcksxJXuv+cQETEz5gx80tzaOewsaSkyytm/zxFxSACIiACIiACIiACIiAC/zsBKSD/OzufuPKLLyabH8mQoSMu8kXxiQwqEyIgAiIgAiIgAiIgAiLgIiAFxAXjWtyMPXkS80IXIEq+H9fi7VOeRUAEREAEREAEROD/HYH/A/TSTRP4kbr6AAAAAElFTkSuQmCC) ChemProt RE works well with `ner_chemprot_clinical` find relationships between the following entities`CHEMICAL`: Chemical entity mention type; `GENE-Y`: gene/protein mention type that can be normalized or associated to a biological database identifier; `GENE-N`: gene/protein mention type that cannot be normalized to a database identifier. ###Code ner_tagger = MedicalNerModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunk") chemprot_re_model = RelationExtractionModel()\ .pretrained("re_chemprot_clinical", "en", 'clinical/models')\ .setInputCols(["embeddings", "pos_tags", "ner_chunk", "dependencies"])\ .setOutputCol("relations")\ .setMaxSyntacticDistance(4)\ chemprot_pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ner_tagger, ner_converter, dependency_parser, chemprot_re_model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") chemprot_model = chemprot_pipeline.fit(empty_data) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' lmodel = LightPipeline(chemprot_model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____ ###Markdown Train a Relation Extraction Model ###Code !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/i2b2_clinical_rel_dataset.csv # if you need to custoize the DL arcitecture (more layers, more features etc.) from sparknlp_jsl.training import tf_graph %tensorflow_version 1.x tf_graph.build("relation_extraction", build_params={"input_dim": 6000, "output_dim": 3, 'batch_norm':1, "hidden_layers": [300, 200], "hidden_act": "relu", 'hidden_act_l2':1}, model_location=".", model_filename="re_with_BN") tf_graph.print_model_params("relation_extraction") data = spark.read.option("header","true").format("csv").load("i2b2_clinical_rel_dataset.csv") data = data.select( 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel','dataset') data.show(10) # you only need these columns>> 'sentence','firstCharEnt1','firstCharEnt2','lastCharEnt1','lastCharEnt2', "chunk1", "chunk2", "label1", "label2",'rel' # ('dataset' column is optional) data.groupby('dataset').count().show() #Annotation structure annotationType = T.StructType([ T.StructField('annotatorType', T.StringType(), False), T.StructField('begin', T.IntegerType(), False), T.StructField('end', T.IntegerType(), False), T.StructField('result', T.StringType(), False), T.StructField('metadata', T.MapType(T.StringType(), T.StringType()), False), T.StructField('embeddings', T.ArrayType(T.FloatType()), False) ]) #UDF function to convert train data to names entitities @F.udf(T.ArrayType(annotationType)) def createTrainAnnotations(begin1, end1, begin2, end2, chunk1, chunk2, label1, label2): entity1 = sparknlp.annotation.Annotation("chunk", begin1, end1, chunk1, {'entity': label1.upper(), 'sentence': '0'}, []) entity2 = sparknlp.annotation.Annotation("chunk", begin2, end2, chunk2, {'entity': label2.upper(), 'sentence': '0'}, []) entity1.annotatorType = "chunk" entity2.annotatorType = "chunk" return [entity1, entity2] #list of valid relations rels = ["TrIP", "TrAP", "TeCP", "TrNAP", "TrCP", "PIP", "TrWP", "TeRP"] #a query to select list of valid relations valid_rel_query = "(" + " OR ".join(["rel = '{}'".format(rel) for rel in rels]) + ")" data = data\ .withColumn("begin1i", F.expr("cast(firstCharEnt1 AS Int)"))\ .withColumn("end1i", F.expr("cast(lastCharEnt1 AS Int)"))\ .withColumn("begin2i", F.expr("cast(firstCharEnt2 AS Int)"))\ .withColumn("end2i", F.expr("cast(lastCharEnt2 AS Int)"))\ .where("begin1i IS NOT NULL")\ .where("end1i IS NOT NULL")\ .where("begin2i IS NOT NULL")\ .where("end2i IS NOT NULL")\ .where(valid_rel_query)\ .withColumn( "train_ner_chunks", createTrainAnnotations( "begin1i", "end1i", "begin2i", "end2i", "chunk1", "chunk2", "label1", "label2" ).alias("train_ner_chunks", metadata={'annotatorType': "chunk"})) train_data = data.where("dataset='train'") test_data = data.where("dataset='test'") !wget https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/generic_classifier_graph/RE_in1200D_out20.pb documenter = DocumentAssembler()\ .setInputCol("sentence")\ .setOutputCol("sentences") tokenizer = Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") pos_tagger = PerceptronModel()\ .pretrained("pos_clinical", "en", "clinical/models") \ .setInputCols(["sentences", "tokens"])\ .setOutputCol("pos_tags") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "pos_tags", "tokens"])\ .setOutputCol("dependencies") # set training params and upload model graph (see ../Healthcare/8.Generic_Classifier.ipynb) reApproach = RelationExtractionApproach()\ .setInputCols(["embeddings", "pos_tags", "train_ner_chunks", "dependencies"])\ .setOutputCol("relations")\ .setLabelColumn("rel")\ .setEpochsNumber(70)\ .setBatchSize(200)\ .setDropout(0.5)\ .setLearningRate(0.001)\ .setModelFile("/content/RE_in1200D_out20.pb")\ .setFixImbalance(True)\ .setFromEntity("begin1i", "end1i", "label1")\ .setToEntity("begin2i", "end2i", "label2")\ .setOutputLogsPath('/content') finisher = Finisher()\ .setInputCols(["relations"])\ .setOutputCols(["relations_out"])\ .setCleanAnnotations(False)\ .setValueSplitSymbol(",")\ .setAnnotationSplitSymbol(",")\ .setOutputAsArray(False) train_pipeline = Pipeline(stages=[ documenter, tokenizer, words_embedder, pos_tagger, dependency_parser, reApproach, finisher ]) %time rel_model = train_pipeline.fit(train_data) rel_model.stages rel_model.stages[-2] rel_model.stages[-2].write().overwrite().save('custom_RE_model') result = rel_model.transform(test_data) recall = result\ .groupBy("rel")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("recall"))\ .select( F.col("rel").alias("relation"), F.format_number("recall", 2).alias("recall"))\ .show() performance = result\ .where("relations_out <> ''")\ .groupBy("relations_out")\ .agg(F.avg(F.expr("IF(rel = relations_out, 1, 0)")).alias("precision"))\ .select( F.col("relations_out").alias("relation"), F.format_number("precision", 2).alias("precision"))\ .show() result_df = result.select(F.explode(F.arrays_zip('relations.result', 'relations.metadata')).alias("cols")) \ .select(F.expr("cols['0']").alias("relation"), F.expr("cols['1']['entity1']").alias("entity1"), F.expr("cols['1']['entity1_begin']").alias("entity1_begin"), F.expr("cols['1']['entity1_end']").alias("entity1_end"), F.expr("cols['1']['chunk1']").alias("chunk1"), F.expr("cols['1']['entity2']").alias("entity2"), F.expr("cols['1']['entity2_begin']").alias("entity2_begin"), F.expr("cols['1']['entity2_end']").alias("entity2_end"), F.expr("cols['1']['chunk2']").alias("chunk2"), F.expr("cols['1']['confidence']").alias("confidence") ) result_df.show(50, truncate=100) ###Output +--------+---------+-------------+-----------+----------------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ |relation| entity1|entity1_begin|entity1_end| chunk1|entity2|entity2_begin|entity2_end| chunk2|confidence| +--------+---------+-------------+-----------+----------------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ | TrAP|TREATMENT| 3| 9| lotions|PROBLEM| 34| 42| incisions| 0.902003| | PIP| PROBLEM| 196| 239| an inferior and right ventricular infarction|PROBLEM| 145| 176| 1-mm st depressions in i and avl| 0.9858937| | TeRP| TEST| 1| 20| abdominal ultrasound|PROBLEM| 54| 71| gallbladder sludge| 0.7711739| | TrAP|TREATMENT| 99| 133| ir placement of a drainage catheter|PROBLEM| 139| 173| his abdominopelvic fluid collection| 0.9975405| | PIP| TEST| 12| 23| previous w/u|PROBLEM| 35| 47| elev ferritin| 0.6097321| | TeRP| TEST| 1| 10| urinalysis|PROBLEM| 72| 95| positive red blood cells| 0.7246275| | TrAP|TREATMENT| 79| 87| lidocaine|PROBLEM| 5| 11| burning|0.52968794| | TeRP| TEST| 1| 13| a colonoscopy|PROBLEM| 44| 51| bleeding|0.74094236| | TrAP|TREATMENT| 56| 77| packed red blood cells|PROBLEM| 146| 168| an estimated blood loss|0.58224684| | TeRP| TEST| 1| 8| a ct abd|PROBLEM| 16| 36| 8mm obstructing stone| 0.9803084| | TrAP|TREATMENT| 198| 209| levofloxacin|PROBLEM| 111| 122| her symptoms|0.55786383| | TeRP| TEST| 1| 9| pathology|PROBLEM| 19| 77|poorly differentiated squamous cell carcinoma of the cervix|0.97840136| | TrAP|TREATMENT| 96| 99| cpap|PROBLEM| 57| 76| abdominal distention|0.97730106| | PIP| PROBLEM| 21| 26| sepsis|PROBLEM| 46| 51| fevers| 0.7468813| | TrNAP|TREATMENT| 119| 126| diuresis|PROBLEM| 1| 20| creatinine elevation| 0.8847239| | TrAP|TREATMENT| 95| 100| ativan|PROBLEM| 26| 34| nauseated|0.78614753| | PIP| PROBLEM| 283| 285| ars|PROBLEM| 294| 312| medication toxicity| 0.6639314| | PIP| PROBLEM| 70| 89| right hydronephrosis|PROBLEM| 3| 52| an 8 x 7 x 7 mm stone in the proximal right ureter| 0.6111607| | TrAP|TREATMENT| 63| 75| the procedure|PROBLEM| 17| 36| any untoward affects| 0.8464462| | PIP| PROBLEM| 50| 73| polyendocrine deficiency|PROBLEM| 112| 113| dm| 0.8639717| | TrAP|TREATMENT| 10| 15| repair|PROBLEM| 20| 32| neck fracture| 0.9890058| | TrAP|TREATMENT| 35| 52| prolonged pressure|PROBLEM| 80| 91| the bleeding| 0.5180201| | TrCP|TREATMENT| 36| 85| percutaneous endoscopic gastrostomy tube placement|PROBLEM| 3| 22| failed swallow study| 0.6812541| | PIP|TREATMENT| 26| 37| the tubefeed|PROBLEM| 14| 18| 30 lb|0.62799007| | TrAP|TREATMENT| 40| 85| postradical cystoprostatectomy with ileal loop|PROBLEM| 91| 122| locally invasive prostate cancer| 0.9172636| | TrAP|TREATMENT| 90| 99| management|PROBLEM| 104| 129| her pulmonary hypertension|0.93248284| | PIP| PROBLEM| 54| 81| paroxysmal nocturnal dyspnea|PROBLEM| 14| 48| a new two to three-pillow orthopnea| 0.938311| | TeRP| TEST| 38| 58| previous examinations|PROBLEM| 1| 15| this ecchymosis| 0.7367641| | TrAP|TREATMENT| 117| 132| the beta blocker|PROBLEM| 34| 52| a persistent wheeze| 0.5510451| | TeRP| TEST| 1| 14| the ultrasound|PROBLEM| 22| 35| biliary sludge| 0.9784904| | TrAP|TREATMENT| 117| 137| intravenous diltiazem|PROBLEM| 40| 58| atrial fibrillation| 0.9556251| | PIP| PROBLEM| 17| 29| this headache|PROBLEM| 34| 51| the worst headache|0.95420444| | TrAP|TREATMENT| 98| 100| ddi|PROBLEM| 69| 80| pancytopenia| 0.9504795| | TrAP|TREATMENT| 14| 20| removal|PROBLEM| 25| 33| cbd stone|0.97088236| | PIP| PROBLEM| 166| 223|posterior cerebral artery distribution cerebral infarction|PROBLEM| 230| 240| mass effect|0.99167824| | TrAP| PROBLEM| 69| 77| allergies|PROBLEM| 104| 108| hives|0.65732735| | TrNAP| TEST| 10| 18| the study|PROBLEM| 112| 132| a cecal cystic lesion| 0.5509148| | PIP| TEST| 8| 32| a cardiac catheterization|PROBLEM| 49| 64| 3-vessel disease| 0.6397913| | PIP| PROBLEM| 35| 40| a boil|PROBLEM| 47| 49| pus|0.94496614| | PIP| TEST| 79| 101| frozen section analysis|PROBLEM| 70| 74| tumor| 0.7076123| | TrAP| PROBLEM| 341| 362| acanthamoeba sinusitis|PROBLEM| 309| 336| any bacterial superinfection| 0.5147058| | TrAP|TREATMENT| 240| 251| levofloxacin|PROBLEM| 341| 362| acanthamoeba sinusitis| 0.9273041| | TrAP|TREATMENT| 45| 54| vancomycin|PROBLEM| 149| 190| enteric and non-enteric gram negative rods| 0.9747977| | TrNAP| TEST| 17| 31| mediastinoscopy|PROBLEM| 185| 195| anthracosis|0.95775473| | PIP| PROBLEM| 55| 96| an additional dysfunction on the left side|PROBLEM| 109| 140| wide-spread cortical dysfunction| 0.9929785| | TeRP| TEST| 1| 3| mri|PROBLEM| 44| 79| sz effects in the left temporal lobe| 0.7495965| | PIP| PROBLEM| 46| 49| pain|PROBLEM| 64| 74| her problem| 0.7741457| | TrAP|TREATMENT| 38| 45| morphine|PROBLEM| 51| 54| pain|0.87787044| | PIP| PROBLEM| 138| 171| anteroseptal myocardial infarction|PROBLEM| 93| 112| q waves in leads iii| 0.9737403| | PIP| PROBLEM| 62| 78| myasthenia gravis|PROBLEM| 106| 112| thymoma| 0.9233437| +--------+---------+-------------+-----------+----------------------------------------------------------+-------+-------------+-----------+-----------------------------------------------------------+----------+ only showing top 50 rows ###Markdown Load trained model from disk ###Code import pandas as pd def get_relations_df (results, col='relations'): rel_pairs=[] for rel in results[0][col]: rel_pairs.append(( rel.result, rel.metadata['entity1'], rel.metadata['entity1_begin'], rel.metadata['entity1_end'], rel.metadata['chunk1'], rel.metadata['entity2'], rel.metadata['entity2_begin'], rel.metadata['entity2_end'], rel.metadata['chunk2'], rel.metadata['confidence'] )) rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence']) return rel_df documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") clinical_ner_tagger = MedicalNerModel()\ .pretrained("ner_clinical",'en','clinical/models')\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") loaded_re_Model = RelationExtractionModel()\ .load("custom_RE_model")\ .setInputCols(["embeddings", "pos_tags", "ner_chunks", "dependencies"]) \ .setOutputCol("relations")\ .setRelationPairs(["problem-test", "problem-treatment"])\ .setPredictionThreshold(0.9)\ .setMaxSyntacticDistance(4) pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, loaded_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2, presented with a one-week history of polyuria, polydipsia, poor appetite, and vomiting. Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection. She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ re_model_light = LightPipeline(model) annotations = re_model_light.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown End-to-end trained Models using BioBERT Latest addition to Spark NLP for Heathcare - (Requires Spark NLP 2.7.3+ and Spark NLP JSL 2.7.3+) These models are trained as end-to-end bert models using BioBERT and ported in to the Spark NLP ecosystem. They offer SOTA performance on most benchmark tasks and outperform our existing Relation Extraction Models. 1. Clinical ReDL ###Code documenter = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") sentencer = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentences") tokenizer = Tokenizer()\ .setInputCols(["sentences"])\ .setOutputCol("tokens")\ words_embedder = WordEmbeddingsModel()\ .pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentences", "tokens"])\ .setOutputCol("embeddings") clinical_ner_tagger = MedicalNerModel()\ .pretrained("ner_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ner_converter = NerConverterInternal() \ .setInputCols(["sentences", "tokens", "ner_tags"]) \ .setOutputCol("ner_chunks") dependency_parser = DependencyParserModel()\ .pretrained("dependency_conllu", "en")\ .setInputCols(["sentences", "ner_tags", "tokens"])\ .setOutputCol("dependencies") clinical_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4)\ .setRelationPairs(["problem-test", "problem-treatment"])# we can set the possible relation pairs (if not set, all the relations will be calculated) clinical_re_Model = RelationExtractionDLModel() \ .pretrained('redl_clinical_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, clinical_ner_tagger, ner_chunker, dependency_parser, clinical_re_ner_chunk_filter, clinical_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text ="""A 28-year-old female with a history of gestational diabetes mellitus diagnosed eight years prior to presentation and subsequent type two diabetes mellitus ( T2DM ), one prior episode of HTG-induced pancreatitis three years prior to presentation, associated with an acute hepatitis , and obesity with a body mass index ( BMI ) of 33.5 kg/m2 , presented with a one-week history of polyuria , polydipsia , poor appetite , and vomiting . Two weeks prior to presentation , she was treated with a five-day course of amoxicillin for a respiratory tract infection . She was on metformin , glipizide , and dapagliflozin for T2DM and atorvastatin and gemfibrozil for HTG . She had been on dapagliflozin for six months at the time of presentation. Physical examination on presentation was significant for dry oral mucosa ; significantly , her abdominal examination was benign with no tenderness , guarding , or rigidity . Pertinent laboratory findings on admission were : serum glucose 111 mg/dl , bicarbonate 18 mmol/l , anion gap 20 , creatinine 0.4 mg/dL , triglycerides 508 mg/dL , total cholesterol 122 mg/dL , glycated hemoglobin ( HbA1c ) 10% , and venous pH 7.27 . Serum lipase was normal at 43 U/L . Serum acetone levels could not be assessed as blood samples kept hemolyzing due to significant lipemia . The patient was initially admitted for starvation ketosis , as she reported poor oral intake for three days prior to admission . However , serum chemistry obtained six hours after presentation revealed her glucose was 186 mg/dL , the anion gap was still elevated at 21 , serum bicarbonate was 16 mmol/L , triglyceride level peaked at 2050 mg/dL , and lipase was 52 U/L . The β-hydroxybutyrate level was obtained and found to be elevated at 5.29 mmol/L - the original sample was centrifuged and the chylomicron layer removed prior to analysis due to interference from turbidity caused by lipemia again . The patient was treated with an insulin drip for euDKA and HTG with a reduction in the anion gap to 13 and triglycerides to 1400 mg/dL , within 24 hours . Her euDKA was thought to be precipitated by her respiratory tract infection in the setting of SGLT2 inhibitor use . The patient was seen by the endocrinology service and she was discharged on 40 units of insulin glargine at night , 12 units of insulin lispro with meals , and metformin 1000 mg two times a day . It was determined that all SGLT2 inhibitors should be discontinued indefinitely . She had close follow-up with endocrinology post discharge . """ lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!="O")] rel_df ###Output _____no_output_____ ###Markdown 2. Clinical Temporal Events ReDL ###Code events_ner_tagger = MedicalNerModel()\ .pretrained("ner_events_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") events_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks") events_re_Model = RelationExtractionDLModel() \ .pretrained('redl_temporal_events_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.5)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, events_ner_tagger, ner_chunker, dependency_parser, events_re_ner_chunk_filter, events_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text ="She is diagnosed as cancer in 1991. Then she was admitted to Mayo Clinic in May 2000 and discharged in October 2001" lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df = rel_df[(rel_df.relation!="O")] rel_df[(rel_df.relation!="O")&(rel_df.entity1!=rel_df.entity2)] text ="""She is admitted to The John Hopkins Hospital 2 days ago with a history of gestational diabetes mellitus diagnosed. She was seen by the endocrinology service and she was discharged on 03/02/2018 on 40 units of insulin glargine, 12 units of insulin lispro, and metformin 1000 mg two times a day. She had close follow-up with endocrinology post discharge. """ annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df.confidence = rel_df.confidence.astype(float) rel_df[(rel_df.relation!="O")] ###Output _____no_output_____ ###Markdown 3. Human Phenotype - Gene ReDL ###Code pgr_ner_tagger = MedicalNerModel()\ .pretrained("ner_human_phenotype_gene_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") pgr_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) pgr_re_Model = RelationExtractionDLModel() \ .pretrained('redl_human_phenotype_gene_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.5)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, pgr_ner_tagger, ner_chunker, dependency_parser, pgr_re_ner_chunk_filter, pgr_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text = "She has a retinal degeneration, hearing loss and renal failure, short stature, \ Mutations in the SH3PXD2B gene coding for the Tks4 protein are responsible for the autosomal recessive." lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df = rel_df[(rel_df.relation!=0)] rel_df ###Output _____no_output_____ ###Markdown 4. Drug-Drug Interaction ReDL ###Code ddi_ner_tagger = MedicalNerModel()\ .pretrained("ner_posology", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") ddi_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) ddi_re_Model = RelationExtractionDLModel() \ .pretrained('redl_drug_drug_interaction_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, ddi_ner_tagger, ner_chunker, dependency_parser, ddi_re_ner_chunk_filter, ddi_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text='When carbamazepine is withdrawn from the combination therapy, aripiprazole dose should then be reduced. \ If additional adrenergic drugs are to be administered by any route, \ they should be used with caution because the pharmacologically predictable sympathetic effects of Metformin may be potentiated' lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df ###Output _____no_output_____ ###Markdown 5. Chemical–Protein Interactions (ChemProt) ReDL ###Code chemprot_ner_tagger = MedicalNerModel()\ .pretrained("ner_chemprot_clinical", "en", "clinical/models")\ .setInputCols("sentences", "tokens", "embeddings")\ .setOutputCol("ner_tags") chemprot_re_ner_chunk_filter = RENerChunksFilter() \ .setInputCols(["ner_chunks", "dependencies"])\ .setOutputCol("re_ner_chunks")\ .setMaxSyntacticDistance(4) chemprot_re_Model = RelationExtractionDLModel() \ .pretrained('redl_chemprot_biobert', "en", "clinical/models")\ .setPredictionThreshold(0.9)\ .setInputCols(["re_ner_chunks", "sentences"]) \ .setOutputCol("relations") pipeline = Pipeline(stages=[ documenter, sentencer, tokenizer, words_embedder, pos_tagger, chemprot_ner_tagger, ner_chunker, dependency_parser, chemprot_re_ner_chunk_filter, chemprot_re_Model ]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = pipeline.fit(empty_data) text=''' In this study, we examined the effects of mitiglinide on various cloned K(ATP) channels (Kir6.2/SUR1, Kir6.2/SUR2A, and Kir6.2/SUR2B) reconstituted in COS-1 cells, and compared them to another meglitinide-related compound, nateglinide. Patch-clamp analysis using inside-out recording configuration showed that mitiglinide inhibits the Kir6.2/SUR1 channel currents in a dose-dependent manner (IC50 value, 100 nM) but does not significantly inhibit either Kir6.2/SUR2A or Kir6.2/SUR2B channel currents even at high doses (more than 10 microM). Nateglinide inhibits Kir6.2/SUR1 and Kir6.2/SUR2B channels at 100 nM, and inhibits Kir6.2/SUR2A channels at high concentrations (1 microM). Binding experiments on mitiglinide, nateglinide, and repaglinide to SUR1 expressed in COS-1 cells revealed that they inhibit the binding of [3H]glibenclamide to SUR1 (IC50 values: mitiglinide, 280 nM; nateglinide, 8 microM; repaglinide, 1.6 microM), suggesting that they all share a glibenclamide binding site. The insulin responses to glucose, mitiglinide, tolbutamide, and glibenclamide in MIN6 cells after chronic mitiglinide, nateglinide, or repaglinide treatment were comparable to those after chronic tolbutamide and glibenclamide treatment. These results indicate that, similar to the sulfonylureas, mitiglinide is highly specific to the Kir6.2/SUR1 complex, i.e., the pancreatic beta-cell K(ATP) channel, and suggest that mitiglinide may be a clinically useful anti-diabetic drug. ''' lmodel = LightPipeline(model) annotations = lmodel.fullAnnotate(text) rel_df = get_relations_df (annotations) rel_df[rel_df.entity1!=rel_df.entity2] ###Output _____no_output_____
session_02/Assignment_2_NoSQL_(2022).ipynb
###Markdown Install DependenciesReference: * [Google Cloud Firestore Docs](https://cloud.google.com/firestore/docs/how-to)* [Google Cloud Firestore Pypi](https://pypi.org/project/google-cloud-firestore/) ###Code !pip install google-cloud-firestore ###Output _____no_output_____ ###Markdown Import Python Libraries ###Code from google.cloud import firestore import json import os, re ###Output _____no_output_____ ###Markdown Set Environment VariableYou can down the file called **iaa_firestore_sa.json** from Moodle. If you cannot find it for any reason, shoot me an email at [email protected] and I'll provide it. ###Code # Set the environment variable for Google Cloud Firestore. # You can down the file called iaa_firestore_sa.json from Moodle. # If you cannot find it for any reason, shoot me an email # at [email protected] and I'll provide it. # This is a service account that allows read/write access to Google Firestore. os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="iaa_firestore_sa.json" # Connect to Google Cloud Firestore and create the db object. # If this errors out, then it is most likely an authentication issue. # Make sure you have set the environment variable with the provided service account file, iaa_firestore_sa.json service account. (as shown above) db = firestore.Client() # Specify Firestore Params collection_id = 'iaa_2022' ###Output _____no_output_____ ###Markdown Read from Google Cloud FirestoreUse the framework below, along with code that you add, to read a record from Google Cloud Firestore.**Instructions:**1. Read a record/document from Google Cloud Firestore. The record/document is located at a Collection called "iaa_2022" and the record/document name is called "dan_zaratsian". 2. Copy that record/document into a variable called "test_record". (Or you can name the variable something else if you prefer). ###Code # Code to read a record/document from Firestore doc_id = 'dan_zaratsian' fs_obj = db.collection(collection_id).document(doc_id) ''' Add code here to read the record/document from the 'iaa_2022' collection with document id 'dan_zaratsian' Place the results in a variable called "test_record" ''' # Print the test_record that was read from Google Firestore print(test_record) ###Output _____no_output_____ ###Markdown Write to Google Cloud FirestoreUse the framework below, along with your own code, to write a record to Google Cloud Firestore.**Instructions:**1. The "test_record" variable that you created in the previous step should be a json payload. I want you to add a new key-value field to the test_record. You can name the key whatever you want and the value(s) can also be whatever you choose. 2. Write the modified "test_record" json payload back to the Firestore database. Use the collection ID "iaa_2022" and the document id should be your name. You can use the code framework, shown below, as a guide. ###Code # Add a new key-value field to the test_record. test_record['my_new_key'] = 'my_test_value' # Code to write a record/document to Firestore your_doc_id = 'Your Name Here' fs_obj = db.collection(collection_id).document(your_doc_id) ''' Add code here to write the updated test_record into the Firestore database. ''' # NOTE: You should also be able to read the record that you sent to Firestore (just to confirm it worked) ###Output _____no_output_____
notebooks/getting_started.ipynb
###Markdown Getting StartedFirst generating some test data ###Code %load_ext autoreload %autoreload 2 import pandas as pd from annotation_driven_dataframe_calcs.annotation_driven_dataframe_calcs import expand_for_timesteps from annotation_driven_dataframe_calcs.column_names import TIMESTEP_NO, ACCOUNT_NO, PARAM_C, PARAM_B, PARAM_A fake_data = pd.DataFrame({ ACCOUNT_NO: [1, 2, 3, 4], PARAM_A: [201908, 201907, 201906, 201905], PARAM_B: [10, 15, 20, 25], PARAM_C: [5, 6, 7, 8] }) result = expand_for_timesteps(fake_data, 1, 3) result from annotation_driven_dataframe_calcs.first_stage_calcs import generate_series_a_prime new_series = generate_series_a_prime(result) new_series ###Output 2020-12-14 19:42:52.729 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 1 time_step 1 2020-12-14 19:42:52.730 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 1_1 in the cache 2020-12-14 19:42:52.733 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 1 time_step 2 2020-12-14 19:42:52.734 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 1_1 in the cache 2020-12-14 19:42:52.736 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 1_2 in the cache 2020-12-14 19:42:52.738 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 1 time_step 3 2020-12-14 19:42:52.740 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 1_2 in the cache 2020-12-14 19:42:52.742 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 1_3 in the cache 2020-12-14 19:42:52.744 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 2 time_step 1 2020-12-14 19:42:52.746 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 2_1 in the cache 2020-12-14 19:42:52.750 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 2 time_step 2 2020-12-14 19:42:52.751 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 2_1 in the cache 2020-12-14 19:42:52.753 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 2_2 in the cache 2020-12-14 19:42:52.755 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 2 time_step 3 2020-12-14 19:42:52.756 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 2_2 in the cache 2020-12-14 19:42:52.759 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 2_3 in the cache 2020-12-14 19:42:52.759 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 3 time_step 1 2020-12-14 19:42:52.760 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 3_1 in the cache 2020-12-14 19:42:52.762 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 3 time_step 2 2020-12-14 19:42:52.763 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 3_1 in the cache 2020-12-14 19:42:52.765 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 3_2 in the cache 2020-12-14 19:42:52.767 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 3 time_step 3 2020-12-14 19:42:52.769 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 3_2 in the cache 2020-12-14 19:42:52.771 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 3_3 in the cache 2020-12-14 19:42:52.772 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 4 time_step 1 2020-12-14 19:42:52.773 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 4_1 in the cache 2020-12-14 19:42:52.774 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 4 time_step 2 2020-12-14 19:42:52.775 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 4_1 in the cache 2020-12-14 19:42:52.776 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 4_2 in the cache 2020-12-14 19:42:52.777 | DEBUG | annotation_driven_dataframe_calcs.first_stage_calcs:closed_generate_series_a_prime_mapper:48 - rolling window is currently at 4 time_step 3 2020-12-14 19:42:52.777 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 4_2 in the cache 2020-12-14 19:42:52.778 | DEBUG | annotation_driven_dataframe_calcs.caching_tools:__getitem__:12 - searching for key 4_3 in the cache ###Markdown JupyterDashThe `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. ###Code from jupyter_dash import JupyterDash import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd ###Output _____no_output_____ ###Markdown When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration. ###Code JupyterDash.infer_jupyter_proxy_config() ###Output _____no_output_____ ###Markdown Load and preprocess data ###Code df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv') available_indicators = df['Indicator Name'].unique() ###Output _____no_output_____ ###Markdown Construct the app and callbacks ###Code external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__, external_stylesheets=external_stylesheets) # Create server variable with Flask server object for use with gunicorn server = app.server app.layout = html.Div([ html.Div([ html.Div([ dcc.Dropdown( id='crossfilter-xaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Fertility rate, total (births per woman)' ), dcc.RadioItems( id='crossfilter-xaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='crossfilter-yaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Life expectancy at birth, total (years)' ), dcc.RadioItems( id='crossfilter-yaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}) ], style={ 'borderBottom': 'thin lightgrey solid', 'backgroundColor': 'rgb(250, 250, 250)', 'padding': '10px 5px' }), html.Div([ dcc.Graph( id='crossfilter-indicator-scatter', hoverData={'points': [{'customdata': 'Japan'}]} ) ], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}), html.Div([ dcc.Graph(id='x-time-series'), dcc.Graph(id='y-time-series'), ], style={'display': 'inline-block', 'width': '49%'}), html.Div(dcc.Slider( id='crossfilter-year--slider', min=df['Year'].min(), max=df['Year'].max(), value=df['Year'].max(), marks={str(year): str(year) for year in df['Year'].unique()}, step=None ), style={'width': '49%', 'padding': '0px 20px 20px 20px'}) ]) @app.callback( dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value'), dash.dependencies.Input('crossfilter-year--slider', 'value')]) def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value): dff = df[df['Year'] == year_value] return { 'data': [dict( x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'], y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'], text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], mode='markers', marker={ 'size': 25, 'opacity': 0.7, 'color': 'orange', 'line': {'width': 2, 'color': 'purple'} } )], 'layout': dict( xaxis={ 'title': xaxis_column_name, 'type': 'linear' if xaxis_type == 'Linear' else 'log' }, yaxis={ 'title': yaxis_column_name, 'type': 'linear' if yaxis_type == 'Linear' else 'log' }, margin={'l': 40, 'b': 30, 't': 10, 'r': 0}, height=450, hovermode='closest' ) } def create_time_series(dff, axis_type, title): return { 'data': [dict( x=dff['Year'], y=dff['Value'], mode='lines+markers' )], 'layout': { 'height': 225, 'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10}, 'annotations': [{ 'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom', 'xref': 'paper', 'yref': 'paper', 'showarrow': False, 'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)', 'text': title }], 'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'}, 'xaxis': {'showgrid': False} } } @app.callback( dash.dependencies.Output('x-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value')]) def update_y_timeseries(hoverData, xaxis_column_name, axis_type): country_name = hoverData['points'][0]['customdata'] dff = df[df['Country Name'] == country_name] dff = dff[dff['Indicator Name'] == xaxis_column_name] title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name) return create_time_series(dff, axis_type, title) @app.callback( dash.dependencies.Output('y-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value')]) def update_x_timeseries(hoverData, yaxis_column_name, axis_type): dff = df[df['Country Name'] == hoverData['points'][0]['customdata']] dff = dff[dff['Indicator Name'] == yaxis_column_name] return create_time_series(dff, axis_type, yaxis_column_name) ###Output _____no_output_____ ###Markdown Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.This makes it possible to iterativly update the app without rerunning the potentially expensive data processing steps. ###Code app.run_server() ###Output _____no_output_____ ###Markdown By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell. ###Code app.run_server(mode="inline") ###Output _____no_output_____ ###Markdown Getting Started with SYMPAIS[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ethanluoyc/sympais/blob/master/notebooks/getting_started.ipynb) Setup ###Code try: import google.colab IN_COLAB = True except: IN_COLAB = False ###Output _____no_output_____ ###Markdown Install SYMPAIS ###Code # (TODO(yl): Simplify when we make this public) GIT_TOKEN = "" if IN_COLAB: !pip install -U pip setuptools wheel if GIT_TOKEN: !pip install git+https://{GIT_TOKEN}@github.com/ethanluoyc/sympais.git#egg=sympais else: !pip install git+https://github.com/ethanluoyc/sympais.git#egg=sympais ###Output _____no_output_____ ###Markdown Download and install pre-built RealPaver v0.4 ###Code if IN_COLAB: !curl -L "https://drive.google.com/uc?export=download&id=1_Im0Ot5TjkzaWfid657AV_gyMpnPuVRa" -o realpaver !chmod u+x realpaver !cp realpaver /usr/local/bin import jax import jax.numpy as jnp from sympais import tasks from sympais import methods from sympais.methods import run_sympais, run_dmc import seaborn as sns import matplotlib.pyplot as plt import matplotlib import numpy as onp import math %load_ext autoreload %autoreload 2 %matplotlib inline ###Output _____no_output_____ ###Markdown Load a task ###Code task = tasks.Sphere(nd=3) task.profile task.constraints task.domains ###Output _____no_output_____ ###Markdown Run DMC baseline ###Code dmc_output = run_dmc(task, seed=0, num_samples=int(1e8)) print(dmc_output) ###Output _____no_output_____ ###Markdown Run SYMPAIS ###Code sympais_output = run_sympais( task, key=jax.random.PRNGKey(0), num_samples=int(1e6), num_proposals=100, tune=False, init='realpaver', num_warmup_steps=500, window_size=100 ) print(sympais_output) ###Output _____no_output_____ ###Markdown Create your own problemIn this section, we will show how to implement a new probabilistic analysistask similar to the sphere task above.A probabilistic ananlysis `Task` consists of an input `Profile` $p(\mathbf{x})$ and a list of constraints `cs`. A user create a new `Task` either by calling the super class constructor or subclassing the base class. Consider a two-dimensional problems where we would like to know the probablity that the the inputs $x \in [-10, 10]$ and $y \in [-10, 10]$ are jointly in the interior of a two-dimensional _cube_. The set of constraints is$$\begin{align} x + y &\leq 1.0, \\ x + y &\geq -1.0, \\ y - x &\geq -1.0, \\ y - x &\leq 1.0.\end{align}$$ First, let's import the related modules used for defining the tasks ###Code import sympy from sympais import tasks from sympais import profiles from sympais import distributions as dist ###Output _____no_output_____ ###Markdown Independent profile We will first show how to define a task when the input variables are _independent_. We use `Profile` for defining the input distribution and SymPy expressions for defining the constraints.The `Profile` uses the following iterface. To create a customized profile, the user needs to implement `profile.log_prob` and `profile.sample` functions. Note that unlike numpyro distributions, the samples are represented as a dictionary from variable names to their values. This is so that it is easier to integrate with a symbolic execution engine. ###Code help(profiles.Profile) ###Output _____no_output_____ ###Markdown When the input random variables are independent, we provide a convenience `IndependentProfile` class which allows you to specify the per-component distribution. `IndependentProfile` implements `sample` and `log_prob` by dispatching to the individual components and then aggretating the results. We are now ready to define a task for the `cube` problem. The code is shown below. ###Code class IndependentCubeTask(tasks.Task): def __init__(self): profile = profiles.IndependentProfile({ "x": dist.Normal(loc=-2, scale=1), "y": dist.Normal(loc=-2, scale=1) }) domains = {"x": (-10., 10.), "y": (-10., 10.)} b = 1.0 x = sympy.Symbol("x") y = sympy.Symbol("y") c1 = x + y <= b # type: sympy.Expr c2 = x + y >= -b # type: sympy.Expr c3 = y - x >= -b # type: sympy.Expr c4 = y - x <= b # type: sympy.Expr super().__init__(profile, [c1, c2, c3, c4], domains) ###Output _____no_output_____ ###Markdown Let us create some helper functions for visualizing the profile and the constraints. ###Code b = 1. def f1(x): return b - x def f2(x): return -b - x def f3(x): return -b + x def f4(x): return b + x x = sympy.Symbol('x') x1, = sympy.solve(f1(x)-f3(x)) x2, = sympy.solve(f1(x)-f4(x)) x3, = sympy.solve(f2(x)-f3(x)) x4, = sympy.solve(f2(x)-f4(x)) y1 = f1(x1) y2 = f1(x2) y3 = f2(x3) y4 = f2(x4) N = 200 X, Y = jnp.meshgrid(jnp.linspace(-4,4,N), jnp.linspace(-4, 4, N)) xr = jnp.linspace(-3, 3, 100) def plot_constraints(ax): ax.plot(x1, y1, 'k', markersize=5) ax.plot(x2, y2, 'k', markersize=5) ax.plot(x3, y3, 'k', markersize=5) ax.plot(x4, y4, 'k', markersize=5) ax.fill([x1,x2,x4,x3],[y1,y2,y4,y3],'gray', alpha=0.5); y1r = f1(xr) y2r = f2(xr) y3r = f3(xr) y4r = f4(xr) ax.plot(xr, y1r, 'w--') ax.plot(xr, y2r, 'w--') ax.plot(xr, y3r, 'w--') ax.plot(xr, y4r, 'w--') cube_task = IndependentCubeTask() logp = cube_task.profile.log_prob( {'x': X.reshape(-1), "y": Y.reshape(-1)}).reshape((N, N)) fig, ax = plt.subplots(1, 1, figsize=(3,3)) ax.contourf(X, Y, logp, levels=20, cmap='Blues_r') plot_constraints(ax) ax.set(xlim=(-3,2), ylim=(-3,2), xlabel='$x$', ylabel='$y$'); ###Output _____no_output_____ ###Markdown Correlated profile In the general case, the inputs may be correlated. In this case, the user needs to provide a custom implementationof `Profile`. We will show how to do this for the case where $x$ and $y$ are jointly Gaussian. ###Code from numpyro import distributions as numpyro_dist class CorrelatedProfile(profiles.Profile): def __init__(self): self._dist = numpyro_dist.MultivariateNormal( loc=jnp.array([-2, -2]), covariance_matrix=jnp.array([[1.0, 0.8], [0.8, 1.5]]) ) def sample(self, rng, sample_shape=()): samples = self._dist.sample(rng, sample_shape=sample_shape) # We needs the [..., ] to maintain batch dimensions. return {'x': samples[..., 0], 'y': samples[..., 1]} def log_prob(self, samples): samples = jnp.stack([samples['x'], samples['y']], -1) return self._dist.log_prob(samples) class CorrelatedCubeTask(tasks.Task): def __init__(self): b = 1.0 x = sympy.Symbol("x") y = sympy.Symbol("y") c1 = x + y <= b # type: sympy.Expr c2 = x + y >= -b # type: sympy.Expr c3 = y - x >= -b # type: sympy.Expr c4 = y - x <= b # type: sympy.Expr profile = CorrelatedProfile() domains = {"x": (-10., 10.), "y": (-10., 10.)} super().__init__(profile, [c1, c2, c3, c4], domains) ###Output _____no_output_____ ###Markdown All of the benchmarks are define similarly to the examples shown above. If you are interested, check our the source code in src/sympais/tasks for more examples. ###Code correlated_cube_task = CorrelatedCubeTask() logp = correlated_cube_task.profile.log_prob( {'x': X.reshape(-1), "y": Y.reshape(-1)}).reshape((N, N)) fig, ax = plt.subplots(1, 1, figsize=(3,3)) ax.contourf(X, Y, logp, levels=20, cmap='Blues_r') plot_constraints(ax) ax.set(xlim=(-3,2), ylim=(-3,2), xlabel='$x$', ylabel='$y$'); ###Output _____no_output_____ ###Markdown Run samplers Now we have our new task definitions, let's run DMC and SYMPAIS on these tasks. ###Code dmc_output = run_dmc(correlated_cube_task, seed=0, num_samples=int(1e8), batch_size=int(1e6)) print(dmc_output) sympais_output = run_sympais( correlated_cube_task, key=jax.random.PRNGKey(0), num_samples=int(1e6), num_proposals=100, tune=False, init='realpaver', num_warmup_steps=500, window_size=100 ) print(sympais_output) ###Output _____no_output_____ ###Markdown Noisy Labeling of Clinical NotesThis notebook allows you to assign "noisy" labels to clinical notes using heuristics known as labelling functions (LFs).Because this is a largely exploratory process, it may be useful to run the following cell, which allows you to modify the `NoisyLabeler` code without restarting the kernel. ###Code %load_ext autoreload %autoreload 2 ###Output _____no_output_____ ###Markdown Load the DataFirst, you must load some text to label. You will want to have some source of "gold" labels to determine the accuracy of your labelling functions. Your labels should be `1`, indicating the presence of a disease, or `0`, indicating its absence. The following code assumes your data is in a [JSON Lines](https://jsonlines.org/) format, with the fields `"text"` and `"label"`, but you can load the data any way you like. ###Code gold_data_filepath = "../data/MIMIC-III-HEART-DISEASE/valid.jsonl" import json from pathlib import Path import numpy as np valid = [json.loads(line) for line in Path(gold_data_filepath).read_text().strip().split("\n")] texts = [example["text"] for example in valid] labels = np.asarray([example["label"] for example in valid]) ###Output _____no_output_____ ###Markdown (Noisy) Label the DataFirst, initialize the labeller> Note, this can take a few minutes as it loads the language model and resources into memory. ###Code from deep_patient_cohorts import NoisyLabeler labeler = NoisyLabeler() ###Output _____no_output_____ ###Markdown Although optional, it makes sense to preprocess the text with spaCy only one. We can do this easily like so> Note, this will take a few minutes per 1000 documents ###Code processed_texts = labeler.preprocess(texts) ###Output _____no_output_____ ###Markdown Finally, we can label the data and check the accuracy of each labelling function ###Code noisy_labels = labeler(texts) labeler.accuracy(noisy_labels=noisy_labels, gold_labels=labels) ###Output _____no_output_____ ###Markdown Adding New LFs You may need to continually modify your LFs until they reach acceptable accuracy. The following example demonstrates how to add a new LF to the existing `labeler`, and evaluate its accuracy. ###Code from typing import List from deep_patient_cohorts import POSITIVE, NEGATIVE, ABSTAIN def heart_disease(self, texts: List[str]) -> List[int]: return [POSITIVE if "heart disease" in text.text.lower() else ABSTAIN for text in texts] labeler.add(heart_disease) noisy_labels = labeler(processed_texts) labeler.accuracy(noisy_labels=noisy_labels, gold_labels=labels) ###Output _____no_output_____ ###Markdown Of course, you can also modify the `NoisyLabeler` code directly. Training a Label ModelUsing [FlyingSquid](https://github.com/HazyResearch/flyingsquid), we can train a probablistic model to combine our LFs (assuming we have at least 3!) ###Code from flyingsquid.label_model import LabelModel m = noisy_labels.shape[1] label_model = LabelModel(m) label_model.fit(noisy_labels) preds = label_model.predict(noisy_labels).reshape(labels.shape) accuracy = np.sum(preds == labels) / labels.shape[0] print(f"Label model accuracy: {int(100 * accuracy)}%") ###Output _____no_output_____ ###Markdown Removing LFsIf it turns out our new LF performs poorly, we can remove it and try again ###Code del labeler.lfs[-1] ###Output _____no_output_____ ###Markdown Getting started with OGGM: a real case study, step by step The OGGM workflow is best explained with an example. In the following, we will show how to apply the standard [OGGM workflow](http://docs.oggm.org/en/stable/introduction.html) to a list of glaciers. This example is meant to guide you through a first-time setup step-by-step. If you prefer not to install OGGM on your computer, you can always run this notebook in [OGGM-Edu](https://edu.oggm.org) instead! Set-up Input data folders **If you are using your own computer**: before you start, make sure that you have set-up the [input data configuration file](https://docs.oggm.org/en/stable/input-data.html) at your wish.In the course of this tutorial, we will need to download data needed for each glacier (a couple of mb at max, depending on the chosen glaciers), so make sure you have an internet connection. cfg.initialize() and cfg.PARAMS An OGGM simulation script will always start with the following commands: ###Code from oggm import cfg, utils cfg.initialize(logging_level='WARNING') ###Output _____no_output_____ ###Markdown A call to [cfg.initialize()](https://docs.oggm.org/en/stable/generated/oggm.cfg.initialize.html) will read the default parameter file (or any user-provided file) and make them available to all other OGGM tools via the `cfg.PARAMS` dictionary. Here are some examples of these parameters: ###Code cfg.PARAMS['prcp_scaling_factor'], cfg.PARAMS['ice_density'], cfg.PARAMS['continue_on_error'] ###Output _____no_output_____ ###Markdown See [here](https://github.com/OGGM/oggm/blob/master/oggm/params.cfg) for the default parameter file and a description of their role and default value. ###Code # You can try with or without multiprocessing: with two glaciers, OGGM could run on two processors cfg.PARAMS['use_multiprocessing'] = True ###Output _____no_output_____ ###Markdown Workflow In this section, we will explain the fundamental concepts of the OGGM workflow:- Working directories- Glacier directories- Tasks ###Code from oggm import workflow ###Output _____no_output_____ ###Markdown Working directory Each OGGM run needs a **single folder** where to store the results of the computations for all glaciers. This is called a "working directory" and needs to be specified before each run. Here we create a temporary folder for you: ###Code cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-GettingStarted', reset=True) cfg.PATHS['working_dir'] ###Output _____no_output_____ ###Markdown We use a temporary directory for this example, but in practice you will set this working directory yourself (for example: `/home/john/OGGM_output`. The size of this directory will depend on how many glaciers you'll simulate!**This working directory is meant to be persistent**, i.e. you can stop your processing workflow after any task, and restart from an existing working directory at a later stage. Define the glaciers for the run ###Code rgi_ids = ['RGI60-11.01328', 'RGI60-11.00897'] ###Output _____no_output_____ ###Markdown You can provide any number of glacier identifiers to OGGM. In this case, we chose: - `RGI60-11.01328`: [Unteraar Glacier](https://en.wikipedia.org/wiki/Unteraargletscher) in the Swiss Alps- `RGI60-11.00897`: [Hintereisferner](http://acinn.uibk.ac.at/research/ice-and-climate/projects/hintereisferner) in the Austrian Alps.Here is a list of other glaciers you might want to try out:- `RGI60-18.02342`: Tasman Glacier in New Zealand- `RGI60-11.00787`: [Kesselwandferner](https://de.wikipedia.org/wiki/Kesselwandferner) in the Austrian Alps- ... or any other glacier identifier! You can find other glacier identifiers by exploring the [GLIMS viewer](https://www.glims.org/maps/glims).For an operational run on an RGI region, you might want to download the [Randolph Glacier Inventory](https://www.glims.org/RGI/) dataset instead, and start a run from it. This case is covered in the [working with the RGI](working_with_rgi.ipynb) tutorial. Glacier directories The OGGM workflow is organized as a list of **tasks** that have to be applied to a list of glaciers. The vast majority of tasks are called **entity tasks**: they are standalone operations to be realized on one single glacier entity. These tasks are executed sequentially (one after another): they often need input generated by the previous task(s): for example, the climate calibration needs the glacier flowlines, which can be only computed after the topography data has been processed, and so on.To handle this situation, OGGM uses a workflow based on data persistence on disk: instead of passing data as python variables from one task to another, each task will read the data from disk and then write the computation results back to the disk, making these new data available for the next task in the queue.These glacier specific data are located in [glacier directories](https://docs.oggm.org/en/stable/glacierdir.htmlglacier-directories). In the model, these directories are initialized with the following command (this can take a little while on the first call, as OGGM needs to download some data): ###Code # Where to fetch the pre-processed directories gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=80) ###Output _____no_output_____ ###Markdown - the keyword `from_prepro_level` indicates that we will start from [pre-processed directories](https://docs.oggm.org/en/stable/input-data.htmlpre-processed-directories), i.e. data that are already pre-processed for the model users. In many cases you will want to start from level 3, 4 or 5. Here we start from level 3 and rerun some of the processing in order to demonstrate the OGGM workflow.- the `prepro_border` keyword indicates the number of DEM grid points which we'd like to add to each side of the glacier for the local map: the larger the glacier will grow, the larger the border parameter should be. The available pre-processed border values are: **10, 80, 160** (depending on the model set-ups there might be more or less options). These are the fixed map sizes we prepared for you - any other map size will require a full processing (see the [alternative DEM example](dem_sources.ipynb) for a tutorial). The [init_glacier_directories](https://docs.oggm.org/en/stable/generated/oggm.workflow.init_glacier_directories.htmloggm.workflow.init_glacier_directories) task will allways be the very first task to call for all your OGGM experiments. Let's see what it gives us back: ###Code type(gdirs), type(gdirs[0]) ###Output _____no_output_____ ###Markdown `gdirs` is a list of [GlacierDirectory](https://docs.oggm.org/en/stable/generated/oggm.GlacierDirectory.htmloggm.GlacierDirectory) objects (one for each glacier). **Glacier directories are used by OGGM as "file and attribute manager"** for single glaciers. For example, the model now knows where to find the topography data file for this glacier: ###Code gdir = gdirs[0] # take Unteraar glacier print('Path to the DEM:', gdir.get_filepath('dem')) ###Output _____no_output_____ ###Markdown And we can also access some attributes of this glacier: ###Code gdir gdir.rgi_date # date at which the outlines are valid ###Output _____no_output_____ ###Markdown The advantage of this Glacier Directory data model is that it simplifies greatly the data transfer between tasks. **The single mandatory argument of all entity tasks will allways be a glacier directory**. With the glacier directory, each task will find the input it needs: for example, both the glacier's topography and outlines are needed for the next plotting function, and both are available via the `gdir` argument: ###Code from oggm import graphics graphics.plot_domain(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Another advantage of glacier directories is their persistence on disk: once created, **they can be recovered from the same location** by using `init_glacier_directories` again, but without keyword arguments: ###Code # Fetch the LOCAL pre-processed directories - note that no arguments are used! gdirs = workflow.init_glacier_directories(rgi_ids) ###Output _____no_output_____ ###Markdown See the [store_and_compress_glacierdirs](store_and_compress_glacierdirs.ipynb) tutorial for more information on glacier directories. Tasks There are two different types of "[tasks](http://docs.oggm.org/en/stable/api.htmlentity-tasks)":**Entity Tasks**: Standalone operations to be realized on one single glacier entity, independently from the others. The majority of OGGM tasks are entity tasks. They are parallelisable: the same task can run on several glaciers in parallel.**Global Task**: Tasks which require to work on several glacier entities at the same time. Model parameter calibration or the compilation of several glaciers' output are examples of global tasks. OGGM implements a simple mechanism to run a specific task on a list of `GlacierDirectory` objects: ###Code from oggm import tasks # run the glacier_masks task on all gdirs workflow.execute_entity_task(tasks.glacier_masks, gdirs); ###Output _____no_output_____ ###Markdown The task we just applied to our list of glaciers is [glacier_masks](http://docs.oggm.org/en/stable/generated/oggm.tasks.glacier_masks.htmloggm.tasks.glacier_masks). It wrote a new file in our glacier directory, providing raster masks of the glacier (among other things): ###Code print('Path to the masks:', gdir.get_filepath('gridded_data')) ###Output _____no_output_____ ###Markdown It is also possible to apply several tasks sequentially (i.e. one after an other) on our glacier list: ###Code list_talks = [ tasks.compute_centerlines, tasks.initialize_flowlines, tasks.compute_downstream_line, ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The function `execute_task` can run a task on different glaciers at the same time, if the `use_multiprocessing` option is set to `True` in the configuration file. Among other things, we computed the glacier flowlines and the glacier's downstream line. We can now plot them: ###Code graphics.plot_centerlines(gdir, figsize=(8, 7), use_flowlines=True, add_downstream=True) ###Output _____no_output_____ ###Markdown As a result, the glacier directories now store many more files. If you are interested, you can have a look: ###Code import os print(os.listdir(gdir.dir)) ###Output _____no_output_____ ###Markdown For a short explanation of what these files are, see the [glacier directory documentation](https://docs.oggm.org/en/stable/api.htmlcfg-basenames). In practice, however, you will only rarely need to access these files yourself. Other preprocessing tasks Let's continue with the other preprocessing tasks: ###Code list_talks = [ tasks.catchment_area, tasks.catchment_width_geom, tasks.catchment_width_correction, tasks.compute_downstream_bedshape ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown We just computed the catchment areas of each flowline (the colors are arbitrary): ###Code graphics.plot_catchment_areas(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Each flowline now knows what area will contribute to its surface mass-balance and ice flow. Accordingly, it is possible to compute each glacier cross-section's width, and correct it so that the total glacier area and elevation distribution is conserved: ###Code graphics.plot_catchment_width(gdir, corrected=True, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Climate tasks The glacier directories we downloaded already contains the climate timeseries for each glacier (`from_prepro_level=3`). Let's have a look at them: ###Code import xarray as xr fpath = gdir.get_filepath('climate_historical') ds = xr.open_dataset(fpath) # Data is in hydrological years # -> let's just ignore the first and last calendar years ds.temp.resample(time='AS').mean()[1:-1].plot(); ###Output _____no_output_____ ###Markdown This climate data is called the "baseline climate" for this glacier. It will be used for the mass-balance model calibration, and at the end of this tutorial also to generate the random climate to drive a simulation. When running OGGM with GCM data, the GCM timeseries will be computed as anomalies to this baseline timeseries, hence the name.Here we are using CRU, but OGGM-Shop also allows to use ERA5 and CERA as baseline.Now, let's calibrate the mass-balance model for this glacier. The calibration procedure of OGGM is ... original, but is also quite powerful. Read the [doc page](https://docs.oggm.org/en/stable/mass-balance.html) or the [GMD paper](https://www.geosci-model-dev-discuss.net/gmd-2018-9/) for more details, and you can also follow the [mass-balance calibration tutorial](massbalance_calibration.ipynb) explaining some of the model internals.The default calibration process is automated (see also [local_t_star](https://docs.oggm.org/en/stable/generated/oggm.tasks.local_t_star.htmloggm.tasks.local_t_star)): ###Code # Fetch the reference t* list and associated model parameters params_url = 'https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/oggm_v1.4/RGIV62/CRU/centerlines/qc3/pcp2.5' workflow.download_ref_tstars(base_url=params_url) # Now calibrate workflow.execute_entity_task(tasks.local_t_star, gdirs); workflow.execute_entity_task(tasks.mu_star_calibration, gdirs); ###Output _____no_output_____ ###Markdown **¡Important!** The calibration of the mass-balance model is automated only for certain parameter combinations of the model - any change in the mass-balance model settings (e.g. the melt threshold, the precipitation correction factor, etc.) will require a re-calibration of the model (see the [mass-balance calibration tutorial](massbalance_calibration.ipynb) for an introduction to this topic). From there, OGGM can now compute the mass-balance for these glaciers. For example: ###Code from oggm.core.massbalance import MultipleFlowlineMassBalance gdir_hef = gdirs[1] mbmod = MultipleFlowlineMassBalance(gdir_hef, use_inversion_flowlines=True) import numpy as np import matplotlib.pyplot as plt years = np.arange(1902, 2017) mb_ts = mbmod.get_specific_mb(year=years) plt.plot(years, mb_ts); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown For the Hintereiferner (not for Unteraar where no observational data is available), we can also compare our computed mass-balance to the measured one: ###Code mbdf = gdir_hef.get_ref_mb_data() mbdf['OGGM'] = mbmod.get_specific_mb(year=mbdf.index) mbdf[['ANNUAL_BALANCE', 'OGGM']].plot(); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown This graphic is interesting because it shows an effect often observed when comparing the computed mass balance to the observed one: since (in this case) the OGGM geometry is fixed with time, the modelled specific mass-balance series are likely to have a stronger trend than the observed ones.To assess the results of the OGGM mass-balance model for all WGMS glaciers worldwide, visit the [score summary](https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/oggm_v1.4/RGIV62/CRU/centerlines/qc3/pcp2.5/_scores/) for this particular model settings. Computing the ice thickness ("inversion") With the computed mass-balance and the flowlines, OGGM can now compute the ice thickness, based on the principles of [mass conservation and ice dynamics](http://docs.oggm.org/en/stable/inversion.html). ###Code list_talks = [ tasks.prepare_for_inversion, # This is a preprocessing task tasks.mass_conservation_inversion, # This does the actual job tasks.filter_inversion_output # This smoothes the thicknesses at the tongue a little ] for task in list_talks: workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The ice thickness is computed for all sections along the flowline, and can be displayed with the help of OGGM's graphics module: ###Code graphics.plot_inversion(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown The inversion is realized with the default parameter settings: it must be noted that the model is sensitive to the choice of some of them, most notably the creep parameter A: ###Code cfg.PARAMS['inversion_glen_a'] a_factor = np.linspace(0.1, 10., 100) volume = [] for f in a_factor: # Recompute the volume without overwriting the previous computations v = tasks.mass_conservation_inversion(gdir, glen_a=f * cfg.PARAMS['inversion_glen_a'], write=False) volume.append(v * 1e-9) plt.plot(a_factor, volume); plt.title('Unteraar total volume'); plt.ylabel('Volume (km$^3$)'); plt.xlabel('Glen A factor (1 = default)'); ###Output _____no_output_____ ###Markdown There is no simple way to find the best A for each individual glacier. It can easily vary by a factor of 10 (or more) from one glacier to another. At the global scale, the "best" A is close to the default value (possibly between 1 and 1.5 times larger). The default parameter a good choice in a first step but be aware that reconstructions based on this default parameter might be very uncertain! See our [ice thickness inversion tutorial](inversion.ipynb) for a more in-depth discussion. Simulations For most applications, this is where the fun starts! With climate data and an estimate of the ice thickness, we can now start transient simulations. For this tutorial, we will show how to realize idealized experiments based on the baseline climate only, but it is also possible to drive OGGM with real GCM data. ###Code # Convert the flowlines to a "glacier" for the ice dynamics module workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs); ###Output _____no_output_____ ###Markdown Let's start a run driven by a the climate of the last 31 years, shuffled randomly for 200 years. This can be seen as a "commitment" simulation, i.e. how much glaciers will change even without further climate change: ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, y0=2000, output_filesuffix='_2000'); ###Output _____no_output_____ ###Markdown The output of this simulation is stored in two separate files: a diagnostic file (which contains time series variables such as length, volume, ELA, etc.) and a full model output file, which is larger but allows to reproduce the full glacier geometry changes during the run.In practice, the diagnostic files are often compiled for the entire list of glaciers: ###Code ds2000 = utils.compile_run_output(gdirs, filesuffix='_2000') ###Output _____no_output_____ ###Markdown This dataset is also stored on disk (in the working directory) as NetCDF file for later use. Here we can access it directly: ###Code ds2000 ###Output _____no_output_____ ###Markdown We opened the file with [xarray](http://xarray.pydata.org), a very useful data analysis library based on [pandas](http://pandas.pydata.org/). For example, we can plot the volume and length evolution of both glaciers with time: ###Code f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4)) ds2000.volume.plot.line(ax=ax1, hue='rgi_id'); ds2000.length.plot.line(ax=ax2, hue='rgi_id'); ###Output _____no_output_____ ###Markdown The full model output files can be used for plots: ###Code f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 6)) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=0, ax=ax1, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=50, ax=ax2, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=150, ax=ax3, vmax=350) plt.tight_layout(); ###Output _____no_output_____ ###Markdown Sensitivity to temperature Now repeat our simulations with a +0.5°C and -0.5°C temperature bias, which for a glacier is quite a lot! ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=0.5, y0=2000, output_filesuffix='_p05'); workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=-0.5, y0=2000, output_filesuffix='_m05'); dsp = utils.compile_run_output(gdirs, filesuffix='_p05') dsm = utils.compile_run_output(gdirs, filesuffix='_m05') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4)) rgi_id = 'RGI60-11.01328' ds2000.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='Commitment'); ds2000.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); ds2000.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$+$ 0.5°C'); dsp.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$-$ 0.5°C'); dsm.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); ax1.legend(); ###Output _____no_output_____ ###Markdown py12box model usageThis notebook shows how to set up and run the AGAGE 12-box model. Model schematicThe model uses advection and diffusion parameters to mix gases between boxes. Box indices start at the northern-most box and are as shown in the following schematic: Model inputsWe will be using some synthetic inputs for CFC-11. Input files are in:```data/example/CFC-11```The location of this folder will depend on where you've installed py12box and your system. Perhaps the easiest place to view the contents is [in the repository](https://github.com/mrghg/py12box/tree/develop/py12box/data/example/CFC-11).In this folder, you will see two files:```CFC-11_emissions.csv``````CFC-11_initial_conditions.csv```As the names suggest, these contain the emissions, initial conditions and lifetimes. EmissionsThe emissions file has four columns: ```year, box_1, box_2, box_3, box_4```. The number of rows in this file determines the length of the box model simulation.The ```year``` column should contain a decimal date (e.g. 2000.5 for ~June 2000), and can be monthly or annual resolution.The other columns specify the emissions in Gg/yr in each surface box. Initial conditionsThe initial conditions file can be used to specify the mole fraction in pmol/mol (~ppt) in each of the 12 boxes. How to runFirstly import the ```Model``` class. This class contains all the input variables (emissions, initial conditions, etc., and run functions).We are also importing the get_data helper function, only needed for this tutorial, to point to input data files. ###Code # Import from this package from py12box.model import Model from py12box import get_data # Import matplotlib for some plots import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown The ```Model``` class takes two arguments, ```species``` and ```project_directory```. The latter is the location of the input files, here just redirecting to the "examples" folder.The initialisation step may take a few seconds, mainly to compile the model. ###Code # Initialise the model mod = Model("CFC-11", get_data("example/CFC-11")) ###Output Compiling model and tuning lifetime... ... completed in 3 iterations ... stratospheric lifetime: 52.1 ... OH lifetime: 1e12 ... ocean lifetime: 1e12 ... non-OH tropospheric lifetime: 1e12 ... overall lifetime: 52.0 ... done in 5.5602850914001465 s ###Markdown Assuming this has compiled correctly, you can now check the model inputs by accessing elements of the model class. E.g. to see the emissions: ###Code mod.emissions ###Output _____no_output_____ ###Markdown In this case, the emissions should be a 4 x 12*n_years numpy array. If annual emissions were specified in the inputs, the annual mean emissions are repeated each month. We can now run the model using: ###Code # Run model mod.run() ###Output ... done in 0.024969100952148438 s ###Markdown The primary outputs that you'll be interested in are ```mf``` for the mole fraction (pmol/mol) in each of the 12 boxes at each timestep.Let's plot this up: ###Code plt.plot(mod.time, mod.mf[:, 0]) plt.plot(mod.time, mod.mf[:, 3]) plt.ylabel("%s (pmol mol$^{-1}$)" % mod.species) plt.xlabel("Year") plt.show() ###Output _____no_output_____ ###Markdown We can also view other outputs such as the burden and loss. Losses are contained in a dictionary, with keys:- ```OH``` (tropospheric OH losses)- ```Cl``` (losses via tropospheric chlorine)- ```other``` (all other first order losses)For CFC-11, the losses are primarily in the stratosphere, so are contained in ```other```: ###Code plt.plot(mod.emissions.sum(axis = 1).cumsum()) plt.plot(mod.burden.sum(axis = 1)) plt.plot(mod.losses["other"].sum(axis = 1).cumsum()) ###Output _____no_output_____ ###Markdown Another useful output is the lifetime. This is broken down in a variety of ways. Here we'll plot the global lifetime: ###Code plt.plot(mod.instantaneous_lifetimes["global_total"]) plt.ylabel("Global instantaneous lifetime (years)") ###Output _____no_output_____ ###Markdown Setting up your own model runTo create your own project, create a project folder (can be anywhere on your filesystem). The folder must contain two files:```_emissions.csv``````_initial_conditions.csv```To point to the new project, py12box will expect a pathlib.Path object, so make sure you import this first: ###Code from pathlib import Path new_model = Model("<SPECIES>", Path("path/to/project/folder")) ###Output _____no_output_____ ###Markdown Once set up, you can run the model using: ###Code new_model.run() ###Output _____no_output_____ ###Markdown Note that you can modify any of the model inputs in memory by modifying the model class. E.g. to see what happens when you double the emissions: ###Code new_model.emissions *= 2. new_model.run() ###Output _____no_output_____ ###Markdown Changing lifetimesIf no user-defined lifetimes are passed to the model, it will use the values in ```data/inputs/species_info.csv```However, you can start the model up with non-standard lifetimes using the following arguments to the ```Model``` class (all in years):```lifetime_strat```: stratospheric lifetime```lifetime_ocean```: lifetime with respect to ocean uptake```lifetime_trop```: non-OH losses in the tropospheree.g.: ###Code new_model = Model("<SPECIES>", Path("path/to/project/folder"), lifetime_strat=100.) ###Output _____no_output_____ ###Markdown To change the tropospheric OH lifetime, you need to modify the ```oh_a``` or ```oh_er``` attributes of the ```Model``` class. To re-tune the lifetime of the model in-memory, you can use the ```tune_lifetime``` method of the ```Model``` class: ###Code new_model.tune_lifetime(lifetime_strat=50., lifetime_ocean=1e12, lifetime_trop=1e12) ###Output _____no_output_____ ###Markdown Your first Edward programProbabilistic modeling in Edward uses a simple language of random variables. Here we will show a Bayesian neural network. It is a neural network with a prior distribution on its weights.A webpage version is available at http://edwardlib.org/getting-started. ###Code %matplotlib inline from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from edward.models import Normal plt.style.use('ggplot') def build_toy_dataset(N=50, noise_std=0.1): x = np.linspace(-3, 3, num=N) y = np.cos(x) + np.random.normal(0, noise_std, size=N) x = x.astype(np.float32).reshape((N, 1)) y = y.astype(np.float32) return x, y def neural_network(x, W_0, W_1, b_0, b_1): h = tf.tanh(tf.matmul(x, W_0) + b_0) h = tf.matmul(h, W_1) + b_1 return tf.reshape(h, [-1]) ###Output _____no_output_____ ###Markdown First, simulate a toy dataset of 50 observations with a cosine relationship. ###Code ed.set_seed(42) N = 50 # number of data ponts D = 1 # number of features x_train, y_train = build_toy_dataset(N) ###Output _____no_output_____ ###Markdown Next, define a two-layer Bayesian neural network. Here, we define the neural network manually with `tanh` nonlinearities. ###Code W_0 = Normal(loc=tf.zeros([D, 2]), scale=tf.ones([D, 2])) W_1 = Normal(loc=tf.zeros([2, 1]), scale=tf.ones([2, 1])) b_0 = Normal(loc=tf.zeros(2), scale=tf.ones(2)) b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1)) x = x_train y = Normal(loc=neural_network(x, W_0, W_1, b_0, b_1), scale=0.1 * tf.ones(N)) ###Output _____no_output_____ ###Markdown Next, make inferences about the model from data. We will use variational inference. Specify a normal approximation over the weights and biases. ###Code qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 2])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, 2])))) qW_1 = Normal(loc=tf.Variable(tf.random_normal([2, 1])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([2, 1])))) qb_0 = Normal(loc=tf.Variable(tf.random_normal([2])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([2])))) qb_1 = Normal(loc=tf.Variable(tf.random_normal([1])), scale=tf.nn.softplus(tf.Variable(tf.random_normal([1])))) ###Output _____no_output_____ ###Markdown Defining `tf.Variable` allows the variational factors’ parameters to vary. They are initialized randomly. The standard deviation parameters are constrained to be greater than zero according to a [softplus](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) transformation. ###Code # Sample functions from variational model to visualize fits. rs = np.random.RandomState(0) inputs = np.linspace(-5, 5, num=400, dtype=np.float32) x = tf.expand_dims(inputs, 1) mus = tf.stack( [neural_network(x, qW_0.sample(), qW_1.sample(), qb_0.sample(), qb_1.sample()) for _ in range(10)]) # FIRST VISUALIZATION (prior) sess = ed.get_session() tf.global_variables_initializer().run() outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 0") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='prior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____ ###Markdown Now, run variational inference with the [Kullback-Leibler](https://en.wikipedia.org/wiki/Kullback–Leibler_divergence) divergence in order to infer the model’s latent variables with the given data. We specify `1000` iterations. ###Code inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_train}) inference.run(n_iter=1000, n_samples=5) ###Output 1000/1000 [100%] ██████████████████████████████ Elapsed: 12s | Loss: -5.755 ###Markdown Finally, criticize the model fit. Bayesian neural networks define a distribution over neural networks, so we can perform a graphical check. Draw neural networks from the inferred model and visualize how well it fits the data. ###Code # SECOND VISUALIZATION (posterior) outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 1000") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='posterior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____ ###Markdown JupyterDashThe `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. ###Code from jupyter_dash import JupyterDash import dash from dash import dcc from dash import html import pandas as pd ###Output _____no_output_____ ###Markdown When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration. ###Code JupyterDash.infer_jupyter_proxy_config() ###Output _____no_output_____ ###Markdown Load and preprocess data ###Code df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv') available_indicators = df['Indicator Name'].unique() ###Output _____no_output_____ ###Markdown Construct the app and callbacks ###Code external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__, external_stylesheets=external_stylesheets) # Create server variable with Flask server object for use with gunicorn server = app.server app.layout = html.Div([ html.Div([ html.Div([ dcc.Dropdown( id='crossfilter-xaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Fertility rate, total (births per woman)' ), dcc.RadioItems( id='crossfilter-xaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='crossfilter-yaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Life expectancy at birth, total (years)' ), dcc.RadioItems( id='crossfilter-yaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}) ], style={ 'borderBottom': 'thin lightgrey solid', 'backgroundColor': 'rgb(250, 250, 250)', 'padding': '10px 5px' }), html.Div([ dcc.Graph( id='crossfilter-indicator-scatter', hoverData={'points': [{'customdata': 'Japan'}]} ) ], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}), html.Div([ dcc.Graph(id='x-time-series'), dcc.Graph(id='y-time-series'), ], style={'display': 'inline-block', 'width': '49%'}), html.Div(dcc.Slider( id='crossfilter-year--slider', min=df['Year'].min(), max=df['Year'].max(), value=df['Year'].max(), marks={str(year): str(year) for year in df['Year'].unique()}, step=None ), style={'width': '49%', 'padding': '0px 20px 20px 20px'}) ]) @app.callback( dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value'), dash.dependencies.Input('crossfilter-year--slider', 'value')]) def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value): dff = df[df['Year'] == year_value] return { 'data': [dict( x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'], y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'], text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], mode='markers', marker={ 'size': 25, 'opacity': 0.7, 'color': 'orange', 'line': {'width': 2, 'color': 'purple'} } )], 'layout': dict( xaxis={ 'title': xaxis_column_name, 'type': 'linear' if xaxis_type == 'Linear' else 'log' }, yaxis={ 'title': yaxis_column_name, 'type': 'linear' if yaxis_type == 'Linear' else 'log' }, margin={'l': 40, 'b': 30, 't': 10, 'r': 0}, height=450, hovermode='closest' ) } def create_time_series(dff, axis_type, title): return { 'data': [dict( x=dff['Year'], y=dff['Value'], mode='lines+markers' )], 'layout': { 'height': 225, 'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10}, 'annotations': [{ 'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom', 'xref': 'paper', 'yref': 'paper', 'showarrow': False, 'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)', 'text': title }], 'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'}, 'xaxis': {'showgrid': False} } } @app.callback( dash.dependencies.Output('x-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value')]) def update_y_timeseries(hoverData, xaxis_column_name, axis_type): country_name = hoverData['points'][0]['customdata'] dff = df[df['Country Name'] == country_name] dff = dff[dff['Indicator Name'] == xaxis_column_name] title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name) return create_time_series(dff, axis_type, title) @app.callback( dash.dependencies.Output('y-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value')]) def update_x_timeseries(hoverData, yaxis_column_name, axis_type): dff = df[df['Country Name'] == hoverData['points'][0]['customdata']] dff = dff[dff['Indicator Name'] == yaxis_column_name] return create_time_series(dff, axis_type, yaxis_column_name) ###Output _____no_output_____ ###Markdown Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.This makes it possible to iteratively update the app without rerunning the potentially expensive data processing steps. ###Code app.run_server() ###Output _____no_output_____ ###Markdown By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell. ###Code app.run_server(mode="inline") ###Output _____no_output_____ ###Markdown Getting StartedRead in the travel diary, filter to "complete" days, and do some basic summaries Setup ###Code import sys, os import numpy as np import pandas as pd ###Output _____no_output_____ ###Markdown Define and add the path of `rmove_utils` ###Code PYTHONLIB = r'<root directory for rmove_utils>' RMOVE_UTIL_DIR = os.path.join(PYTHONLIB,r'rmove_utils') sys.path.insert(0, RMOVE_UTIL_DIR) from rmove_utils.survey import Survey ###Output _____no_output_____ ###Markdown Create a `config` object from the codebook ###Code import rmove_utils.config as ruc ruc.Config.from_excel(path=r'<Path to the codebook>', config_file=r'<Codebook filename>') ###Output _____no_output_____ ###Markdown Define the location of the survey, and the location to write any outputs ###Code INDIR = r'<Path to rmove data>' OUTDIR = r'<Output path>' ###Output _____no_output_____ ###Markdown Import the surveyImport the survey. This will check each field in the codebook against fields in the data set. It will also check the coded values against the valid codes identified in the codebook. Currently, values are only validated for categorical variables, not continuous variables. ###Code survey = Survey(root=INDIR, household_file=r'household.tsv', person_file=r'person.tsv', trip_file=r'trip.tsv', day_file=r'day.tsv', vehicle_file=r'vehicle.tsv', location_file=r'location.tsv', ) ###Output found unexpected column wkdy_hh_weight_sp_owners in <class 'rmove_utils.households.Households'>. did not find expected column wkdy_hh_weight_sp_owners in <class 'rmove_utils.households.Households'>. found unexpected column wkdy_person_weight_sp_owners in <class 'rmove_utils.persons.Persons'>. did not find expected column person_exp_weight in <class 'rmove_utils.persons.Persons'>. did not find expected column person_weight_day in <class 'rmove_utils.persons.Persons'>. did not find expected column wkdy_person_weight_sp_owners in <class 'rmove_utils.persons.Persons'>. found unexpected column wkdy_trip_weight_sp_owners in <class 'rmove_utils.trips.Trips'>. did not find expected column wkdy_trip_weight_sp_owners in <class 'rmove_utils.trips.Trips'>. found 49 unexpected value(s) A;B for column trip_quality_flag in <class 'rmove_utils.trips.Trips'>. found 17 unexpected value(s) A;C for column trip_quality_flag in <class 'rmove_utils.trips.Trips'>. found 17 unexpected value(s) A;D for column trip_quality_flag in <class 'rmove_utils.trips.Trips'>. found 1 unexpected value(s) C;D for column trip_quality_flag in <class 'rmove_utils.trips.Trips'>. error converting values in columns trip_quality_flag of <class 'rmove_utils.trips.Trips'> to human readable form. nan SUMMARY ERRORY: error converting values in columns trip_quality_flag of <class 'rmove_utils.trips.Trips'> to human readable form. 'A;B' found unexpected column wkdy_day_weight_sp_owners in <class 'rmove_utils.days.Days'>. did not find expected column wkdy_day_weight_sp_owners in <class 'rmove_utils.days.Days'>. error converting values in columns year of <class 'rmove_utils.vehicles.Vehicles'> to human readable form. nan ###Markdown The survey contains an object for each file type. For example, `survey.households`. The imported data is stored in a Pandas DataFrame, `survey.households.data`. It also contains a human readable version of each table, for example `survey.households.human_readable`. Do some basic data maintenanceFilter to just days that are complete ###Code survey.filter_complete_days() ###Output _____no_output_____ ###Markdown Create some basic attribute summaries ###Code survey.summarize(household_weights='wkdy_hh_weight_all_adults', person_weights='wkdy_person_weight_all_adults', day_weights='wkdy_day_weight_all_adults', trip_weights='wkdy_trip_weight_all_adults') ###Output _____no_output_____ ###Markdown Explore the data a bitCheck out the data as importedNote: these are not shown because of potential PII. Uncomment to explore your data internally. ###Code #survey.households.data.head() #survey.persons.data.head() #survey.days.data.head() #survey.trips.data.head() #survey.vehicles.data.head() ###Output _____no_output_____ ###Markdown Now check out the human readable versions ###Code #survey.households.human_readable.head() #survey.persons.human_readable.head() #survey.days.human_readable.head() #survey.trips.human_readable.head() #survey.vehicles.human_readable.head() #survey.locations.human_readable.head() ###Output _____no_output_____ ###Markdown Explore the `data_dicitonary` and `value_lookup`What is `mode_type`, and what are it's valid values? ###Code print(survey.data_dictionary['mode_type']) ###Output field name: mode_type description: Mode category 1: Walk 2: Bike 3: Car 4: Taxi 5: Transit 6: Schoolbus 7: Other 8: Shuttle/vanpool 9: TNC 10: Carshare 11: Bikeshare 12: Scooter share 13: Long-distance passenger mode -9998: Missing: Non-response 995: Missing: Skip logic ###Markdown What about just a dictionary of valid values to descriptions? ###Code survey.trips.value_lookup['mode_type'] ###Output _____no_output_____ ###Markdown How many trips by each `mode_type`? ###Code survey.trips.summary['mode_type'] ###Output _____no_output_____ ###Markdown How many people have X daily trips? ###Code survey.days.summary['num_trips_day'] ###Output _____no_output_____ ###Markdown JupyterDashThe `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. ###Code from jupyter_dash import JupyterDash import dash from dash import dcc from dash import html import pandas as pd ###Output _____no_output_____ ###Markdown When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration. ###Code JupyterDash.infer_jupyter_proxy_config() ###Output _____no_output_____ ###Markdown Load and preprocess data ###Code df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv') available_indicators = df['Indicator Name'].unique() ###Output _____no_output_____ ###Markdown Construct the app and callbacks ###Code external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__, external_stylesheets=external_stylesheets) # Create server variable with Flask server object for use with gunicorn server = app.server app.layout = html.Div([ html.Div([ html.Div([ dcc.Dropdown( id='crossfilter-xaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Fertility rate, total (births per woman)' ), dcc.RadioItems( id='crossfilter-xaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='crossfilter-yaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Life expectancy at birth, total (years)' ), dcc.RadioItems( id='crossfilter-yaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}) ], style={ 'borderBottom': 'thin lightgrey solid', 'backgroundColor': 'rgb(250, 250, 250)', 'padding': '10px 5px' }), html.Div([ dcc.Graph( id='crossfilter-indicator-scatter', hoverData={'points': [{'customdata': 'Japan'}]} ) ], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}), html.Div([ dcc.Graph(id='x-time-series'), dcc.Graph(id='y-time-series'), ], style={'display': 'inline-block', 'width': '49%'}), html.Div(dcc.Slider( id='crossfilter-year--slider', min=df['Year'].min(), max=df['Year'].max(), value=df['Year'].max(), marks={str(year): str(year) for year in df['Year'].unique()}, step=None ), style={'width': '49%', 'padding': '0px 20px 20px 20px'}) ]) @app.callback( dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value'), dash.dependencies.Input('crossfilter-year--slider', 'value')]) def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value): dff = df[df['Year'] == year_value] return { 'data': [dict( x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'], y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'], text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], mode='markers', marker={ 'size': 25, 'opacity': 0.7, 'color': 'orange', 'line': {'width': 2, 'color': 'purple'} } )], 'layout': dict( xaxis={ 'title': xaxis_column_name, 'type': 'linear' if xaxis_type == 'Linear' else 'log' }, yaxis={ 'title': yaxis_column_name, 'type': 'linear' if yaxis_type == 'Linear' else 'log' }, margin={'l': 40, 'b': 30, 't': 10, 'r': 0}, height=450, hovermode='closest' ) } def create_time_series(dff, axis_type, title): return { 'data': [dict( x=dff['Year'], y=dff['Value'], mode='lines+markers' )], 'layout': { 'height': 225, 'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10}, 'annotations': [{ 'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom', 'xref': 'paper', 'yref': 'paper', 'showarrow': False, 'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)', 'text': title }], 'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'}, 'xaxis': {'showgrid': False} } } @app.callback( dash.dependencies.Output('x-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value')]) def update_y_timeseries(hoverData, xaxis_column_name, axis_type): country_name = hoverData['points'][0]['customdata'] dff = df[df['Country Name'] == country_name] dff = dff[dff['Indicator Name'] == xaxis_column_name] title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name) return create_time_series(dff, axis_type, title) @app.callback( dash.dependencies.Output('y-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value')]) def update_x_timeseries(hoverData, yaxis_column_name, axis_type): dff = df[df['Country Name'] == hoverData['points'][0]['customdata']] dff = dff[dff['Indicator Name'] == yaxis_column_name] return create_time_series(dff, axis_type, yaxis_column_name) ###Output _____no_output_____ ###Markdown Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.This makes it possible to iterativly update the app without rerunning the potentially expensive data processing steps. ###Code app.run_server() ###Output _____no_output_____ ###Markdown By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell. ###Code app.run_server(mode="inline") ###Output _____no_output_____ ###Markdown Getting Started with WhiskWhisk makes it easy to create reproducible, collobarative machine learning projects. It provides the project guide rails so you can focus on the data science. Here's what you need to know get started. Virtual EnvironmentThis project comes pre-loaded with a virtual environment named `venv` and an IPython kernel named `bike_classifier_whisk_0_1_21`. This notebooks uses the `bike_classifier_whisk_0_1_21` kernel. In the terminal, run `source venv/bin/activate` to activate the venv.Dependencies are listed in the `requirements.txt` file. Add your dependencies to this file and run `pip install -r requirements.txt` to update your environment. ###Code %cat ../requirements.txt ###Output _____no_output_____ ###Markdown Loading code from Python filesWhen your notebook goes beyond exploratory work, it's a good practice to move your functions and classes to Python files. These are easier to maintain than notebook cells.The cell below ensures that your notebook cells always have a fresh copy of the `src` directory. ###Code # Load the "autoreload" extension. Prior to executing code, modules are reloaded. # There's no need to restart jupyter notebook if you modify code in the `src` directory. # https://ipython.org/ipython-doc/3/config/extensions/autoreload.html %load_ext autoreload # OPTIONAL: always reload modules so that as you change code in src, it gets loaded %autoreload 2 from bike_classifier_whisk_0_1_21.models.model import Model ###Output _____no_output_____ ###Markdown For example, `src/bike_classifier_whisk_0_1_21/data/extract.py` contains a sample function named `extract_example()`. You can call this function from this notebook: ###Code from bike_classifier_whisk_0_1_21.data.extract import * extract_example() ###Output _____no_output_____ ###Markdown Accessing the data directoryTraining data should be in version control alongside your code to ensure experiments are reproducible. For smaller training tests, it is OK to store in Git. For larger training sets, DVC is pre-installed.Place your data inside the project's data directory. You can obtain the path to this directory like this: ###Code import bike_classifier_whisk_0_1_21 bike_classifier_whisk_0_1_21.project.data_dir ###Output _____no_output_____ ###Markdown Saving models to the artifacts directory After training a model you should save it to disk so you can invoke the model later. The method call for saving a model to disk is dependent on your ML framework (for example, Scikit-learn uses pickle while you just call `save` on a PyTorch model).Regardless of your ML framework, save your model and required artifacts for pre/post-processing to the artifacts directory. Saving a model looks this: ###Code # This example uses pickle to serialize a Python object. # Use the preferred serialization approach for your ML framework. import pickle from whisk.model_stub import ModelStub # A fake model from bike_classifier_whisk_0_1_21 import project model = ModelStub() file_path = project.artifacts_dir / "model.pkl" pickle.dump(model, open(file_path,"wb")) ###Output _____no_output_____ ###Markdown Invoking a saved model This project includes a sample `data.models.Model` class that loads a model from disk and allows you to generate a prediction. Find this class inside the `src/bike_classifier_whisk_0_1_21/models/model.py` file. You can invoke the model like this: ###Code from bike_classifier_whisk_0_1_21.models.model import Model model = Model() model.predict([[1]]) ###Output _____no_output_____ ###Markdown Your first Edward programProbabilistic modeling in Edward uses a simple language of random variables. Here we will show a Bayesian neural network. It is a neural network with a prior distribution on its weights.A webpage version is available at http://edwardlib.org/getting-started. ###Code %matplotlib inline from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from edward.models import Normal plt.style.use('ggplot') def build_toy_dataset(N=50, noise_std=0.1): x = np.linspace(-3, 3, num=N) y = np.cos(x) + np.random.normal(0, noise_std, size=N) x = x.astype(np.float32).reshape((N, 1)) y = y.astype(np.float32) return x, y def neural_network(x, W_0, W_1, b_0, b_1): h = tf.tanh(tf.matmul(x, W_0) + b_0) h = tf.matmul(h, W_1) + b_1 return tf.reshape(h, [-1]) ###Output _____no_output_____ ###Markdown First, simulate a toy dataset of 50 observations with a cosine relationship. ###Code ed.set_seed(42) N = 50 # number of data ponts D = 1 # number of features x_train, y_train = build_toy_dataset(N) ###Output _____no_output_____ ###Markdown Next, define a two-layer Bayesian neural network. Here, we define the neural network manually with `tanh` nonlinearities. ###Code W_0 = Normal(loc=tf.zeros([D, 2]), scale=tf.ones([D, 2])) W_1 = Normal(loc=tf.zeros([2, 1]), scale=tf.ones([2, 1])) b_0 = Normal(loc=tf.zeros(2), scale=tf.ones(2)) b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1)) x = x_train y = Normal(loc=neural_network(x, W_0, W_1, b_0, b_1), scale=0.1 * tf.ones(N)) ###Output _____no_output_____ ###Markdown Next, make inferences about the model from data. We will use variational inference. Specify a normal approximation over the weights and biases. ###Code qW_0 = Normal(loc=tf.get_variable("qW_0/loc", [D, 2]), scale=tf.nn.softplus(tf.get_variable("qW_0/scale", [D, 2]))) qW_1 = Normal(loc=tf.get_variable("qW_1/loc", [2, 1]), scale=tf.nn.softplus(tf.get_variable("qW_1/scale", [2, 1]))) qb_0 = Normal(loc=tf.get_variable("qb_0/loc", [2]), scale=tf.nn.softplus(tf.get_variable("qb_0/scale", [2]))) qb_1 = Normal(loc=tf.get_variable("qb_1/loc", [1]), scale=tf.nn.softplus(tf.get_variable("qb_1/scale", [1]))) ###Output _____no_output_____ ###Markdown Defining `tf.get_variable` allows the variational factors’ parameters to vary. They are initialized randomly. The standard deviation parameters are constrained to be greater than zero according to a [softplus](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) transformation. ###Code # Sample functions from variational model to visualize fits. rs = np.random.RandomState(0) inputs = np.linspace(-5, 5, num=400, dtype=np.float32) x = tf.expand_dims(inputs, 1) mus = tf.stack( [neural_network(x, qW_0.sample(), qW_1.sample(), qb_0.sample(), qb_1.sample()) for _ in range(10)]) # FIRST VISUALIZATION (prior) sess = ed.get_session() tf.global_variables_initializer().run() outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 0") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='prior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____ ###Markdown Now, run variational inference with the [Kullback-Leibler](https://en.wikipedia.org/wiki/Kullback–Leibler_divergence) divergence in order to infer the model’s latent variables with the given data. We specify `1000` iterations. ###Code inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_train}) inference.run(n_iter=1000, n_samples=5) ###Output 1000/1000 [100%] ██████████████████████████████ Elapsed: 12s | Loss: -5.755 ###Markdown Finally, criticize the model fit. Bayesian neural networks define a distribution over neural networks, so we can perform a graphical check. Draw neural networks from the inferred model and visualize how well it fits the data. ###Code # SECOND VISUALIZATION (posterior) outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 1000") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='posterior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____ ###Markdown [View in Colaboratory](https://colab.research.google.com/github/LisbonKaggleMeetup/BikeSharingDemand/blob/master/notebooks/getting_started.ipynb) Bike sharing demand prediction challenge Import libraries ###Code import pandas as pd import numpy as np ###Output _____no_output_____ ###Markdown Load data Training dataset ###Code # Load the training dataset train = pd.read_csv("data/train.csv") # Convert the time stamp column to datetime format train.datetime=pd.to_datetime(train.datetime) # Use the time stamp as index (this will let you filter by date) train = train.set_index("datetime") # Sort the dataset just in case (to avoid unexpected bugs) train = train.sort_index() # Rename the count column which conflicts with the method train = train.rename(columns={'count':'total'}) train.head() train.describe() ###Output _____no_output_____ ###Markdown Test ###Code # Repeat for the test set test = pd.read_csv("data/test.csv") test.datetime = pd.to_datetime(test.datetime) test = test.set_index("datetime") test = test.sort_index() test.head() test.tail() ###Output _____no_output_____ ###Markdown Data Preparation Missing values ###Code train.isna().sum() ###Output _____no_output_____ ###Markdown At first sight it seems there are no missing values. However if we count the number of records available in the first 20 days of January we should get 20*24=480. Instead we have: ###Code train[:'2011-01-20'].count() ###Output _____no_output_____ ###Markdown For some reason there are ~50 missing records in the first 20 days alone!!To visualize this we can count the number of records per day with the *resample* method. Note that if no data were missing we'd obtain the value 24 everywhere... ###Code train_copy = train.copy() train_copy['1']=1 train_copy.resample('D') train_copy[:'2011-01-20'].resample('D').count()['1'].plot(figsize=(20,5),style='o') ###Output _____no_output_____ ###Markdown Wow that's lots of days with missing records. According to this on the 18th of January there's only data for half the day, let's confirm: ###Code train["2011-01-18"] ###Output _____no_output_____ ###Markdown ![oh_snap](https://media.giphy.com/media/ljoB50Kfk3ubm/giphy.gif "snap") First solution that comes to mind: Imputation ###Code # Create an index with a complete list of values index_complete = pd.date_range('2011-01-01 00:00:00', '2012-12-31 23:00:00', freq='H') # Use this as index, leaving NaN where values were missing train = train.reindex(index_complete) train.tail() train.isna().sum() # replace all NaN's with the mean value of the column, aka imputing the mean train_imputed = train.fillna(train.mean()) train_imputed.isna().sum() ###Output _____no_output_____ ###Markdown Feature engineering Add a column with the week day ###Code train_imputed['weekday'] = train_imputed.index.weekday_name ###Output _____no_output_____ ###Markdown Data exploration Let's start by analysing a sample of the data: the total demand during first 3 weeks of March 2011 ###Code train_imputed["2011-03-01":"2011-03-21"].total.plot(figsize=(20,4)) train_imputed.loc["2011-03-01":"2011-03-21",'weekday'].resample('D').first() ###Output _____no_output_____ ###Markdown Different behavior on weekdays and weekends:* 2 peakS on weekdays: morning and afternoon* 1 peak on weekends in the middle of the dayTherefore we have weekly seasonality, i.e the behavior (roughly) repeats every week Modelling Baseline: Seasonal Naive modelThe [Naive](https://en.wikipedia.org/wiki/ForecastingNa%C3%AFve_approach) is the simplest (non-trivial) model we can think of:it simply predicts the future will be equal to the past. Seasonal Naive looks for the same period of the last season which in our case means the previous week. ###Code train_shifted = train_imputed.shift(periods=14, freq='D') prediction = train_shifted.loc[test.index,['total']].reset_index().rename(columns={'total':'count'}) prediction.head() prediction.to_csv('baseline.csv',index=False) ###Output _____no_output_____ ###Markdown Delair.ai SDK TutorialIn this notebook, you will be guided to :- Create a **project**, **mission** and **survey**- Create and upload **datasets** (image, rasters, point cloud, mesh, vector and file)- Create an **annotation** and attach a graph to it ⚠️ Make sure to create a copy of this notebook before modifying itTo do that `File > Make a Copy...` Requirements - Delair Python SDK : `pip install python-delairstack` You may execute this notebook one cell after the other with `Shift + Enter` For more options, explore `Cell` in the top menu bar, or read a [tutorial](https://www.dataquest.io/blog/jupyter-notebook-tutorial/) ###Code import os from delairstack import DelairStackSDK import getpass platform_url = 'https://www.delair.ai' login = input('Enter your email ') password = getpass.getpass('Enter your password ') sdk = DelairStackSDK(url=platform_url, user=login, password=password) ###Output _____no_output_____ ###Markdown Download sample files (images, mesh, raster...) An archive `Banana.zip` containing sample files will be downloaded (if not found in the current directory). ###Code import urllib.request import zipfile try: working_dir except NameError: working_dir = os.getcwd() %cd {working_dir} if not os.path.exists('Banana'): print('"Banana" folder not found') if not os.path.exists('Banana.zip'): print('"Banana.zip" not found') print('Downloading it...', end=' ') url = 'https://delair-transfer.s3-eu-west-1.amazonaws.com/sdks/sample-data/Banana.zip' filename, _ = urllib.request.urlretrieve(url, 'Banana.zip') print('OK') print('Extracting "Banana.zip"...', end=' ') with zipfile.ZipFile(filename, 'r') as zip_ref: zip_ref.extractall('.') print('OK') else: print('"Banana" folder found. No need to download it again.') sample_path = './Banana' %cd {sample_path} !ls . ###Output _____no_output_____ ###Markdown Create the project ###Code my_project = sdk.projects.create( name='SDK Tutorial', geometry={"coordinates": [[ [12.397750168471589,-6.0021893390703696], [12.39799683152841,-6.0021893390703696], [12.39799683152841,-6.001515644167185], [12.397750168471589,-6.001515644167185], [12.397750168471589,-6.0021893390703696]]], "type": "Polygon"}) print('We just created the project {!r} with id {!r}'.format( my_project.name, my_project.id)) ###Output _____no_output_____ ###Markdown List the available project properties ###Code dir(my_project) ###Output _____no_output_____ ###Markdown Create the survey (mission + flight) ###Code my_flight, my_mission = sdk.missions.create_survey( name='My survey', coordinates=[ [12.398218168471589,-6.002041339094632], [12.398506831528413,-6.002041339094632], [12.398506831528413,-6.0014106608166315], [12.398218168471589,-6.0014106608166315], [12.398218168471589,-6.002041339094632]], project=my_project.id, survey_date='2015-01-31T00:00:00.000Z', number_of_images=5) print('We just created the mission {!r} with id {!r}'.format( my_mission.name, my_mission.id)) ###Output _____no_output_____ ###Markdown Create the datasets Image 1 ###Code default_horizontal_srs_wkt = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY\ ["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],\ UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]' image1_dataset = sdk.datasets.create_image_dataset( name='00001499', project=my_project.id, mission=my_mission.id, flight=my_flight.id, geometry={"type": "Point", "coordinates": [12.397907, -6.001604983333333]}, horizontal_srs_wkt=default_horizontal_srs_wkt, width=2448, height=2048 ) sdk.datasets.upload_file( dataset=image1_dataset.id, component='image', file_path='00001499.jpg') print('Image dataset {!r} uploaded successfully'.format(image1_dataset.name)) ###Output _____no_output_____ ###Markdown List the available dataset properties ###Code dir(image1_dataset) ###Output _____no_output_____ ###Markdown Same for the other images ###Code def create_and_upload_image_dataset(filename, coords): dataset_name = '.'.join(filename.split('.')[:-1]) # Name is filename without extension dataset = sdk.datasets.create_image_dataset( name=dataset_name, project=my_project.id, mission=my_mission.id, flight=my_flight.id, geometry={"type": "Point", "coordinates": coords}, horizontal_srs_wkt=default_horizontal_srs_wkt, width=2448, height=2048 ) sdk.datasets.upload_file( dataset=dataset.id, component='image', file_path=filename) return sdk.datasets.describe(dataset=dataset.id) image2_dataset = create_and_upload_image_dataset(filename='00001500.jpg', coords=[12.397885, -6.001729]) image3_dataset = create_and_upload_image_dataset(filename='00001501.jpg', coords=[12.397861, -6.001856]) image4_dataset = create_and_upload_image_dataset(filename='00001502.jpg', coords=[12.397843983333333, -6.001977]) image5_dataset = create_and_upload_image_dataset(filename='00001503.jpg', coords=[12.39784, -6.0021]) ###Output _____no_output_____ ###Markdown Complete the survey upload*Otherwise the upload progression will still be visible on the platform* ###Code sdk.missions.complete_survey_upload(flight=my_flight.id) ###Output _____no_output_____ ###Markdown Create the raster datasets Orthomosaic ###Code ortho_dataset = sdk.datasets.create_raster_dataset( name='Orthomosaic', project=my_project.id, mission=my_mission.id, dataset_format='geotiff', categories=['orthomosaic']) sdk.datasets.upload_file( dataset=ortho_dataset.id, component='raster', file_path='Orthomosaic.tif') ###Output _____no_output_____ ###Markdown DSM (Digital Surface Model) ###Code dsm_dataset = sdk.datasets.create_raster_dataset( name='DSM', project=my_project.id, mission=my_mission.id, dataset_format='geotiff', categories=['dsm']) sdk.datasets.upload_file( dataset=dsm_dataset.id, component='raster', file_path='DSM.tif') ###Output _____no_output_____ ###Markdown Create the point cloud dataset ###Code pcl_dataset = sdk.datasets.create_pcl_dataset( name='PointCloud', project=my_project.id, mission=my_mission.id, dataset_format='las') sdk.datasets.upload_file( dataset=pcl_dataset.id, component='pcl', file_path='Point Cloud.las') ###Output _____no_output_____ ###Markdown Create the mesh dataset ###Code mesh_dataset = sdk.datasets.create_mesh_dataset( name='Mesh', project=my_project.id, mission=my_mission.id, dataset_format='obj', texture_count=1) sdk.datasets.upload_file( dataset=mesh_dataset.id, component='mesh', file_path='Model.obj') sdk.datasets.upload_file( dataset=mesh_dataset.id, component='material', file_path='Model.mtl') sdk.datasets.upload_file( dataset=mesh_dataset.id, component='texture', file_path='Model_0.jpg') ###Output _____no_output_____ ###Markdown Create the vector dataset ###Code vector_dataset = sdk.datasets.create_vector_dataset( name='ContoursLines', project=my_project.id, mission=my_mission.id, dataset_format='geojson') sdk.datasets.upload_file( dataset=vector_dataset.id, component='vector', file_path='Contours.geojson') ###Output _____no_output_____ ###Markdown Create an annotation ###Code my_annotation = sdk.annotations.create( project=my_project.id, mission=my_mission.id, geometry={ "type": "Point", "coordinates": [12.397626065125216, -6.001589143208957] }, name='New annotation', type='2d', icon=sdk.annotations.Icons.CONVEYOR ) ###Output _____no_output_____ ###Markdown The end ###Code from IPython.display import Markdown, display from urllib.parse import urljoin def printmd(string): display(Markdown(string)) printmd('### 💥 You can have a look at your project on the platform') project_url = urljoin(platform_url, 'app/project/{}/view;leftPanel=layers'.format(my_project.id)) print(project_url) ###Output _____no_output_____ ###Markdown Noisy Labeling of Clinical NotesThis notebook allows you to assign "noisy" labels to clinical notes using heuristics known as labelling functions (LFs).Because this is a largely exploratory process, it may be useful to run the following cell, which allows you to modify the `NoisyLabeler` code without restarting the kernel. ###Code %load_ext autoreload %autoreload 2 ###Output _____no_output_____ ###Markdown Load the DataFirst, you must load some text to label. You will want to have some source of "gold" labels to determine the accuracy of your labelling functions. Your labels should be `1`, indicating the presence of a disease, or `0`, indicating its absence. The following code assumes your data is in a [JSON Lines](https://jsonlines.org/) format, with the fields `"text"` and `"label"`, but you can load the data any way you like. ###Code gold_data_filepath = "../data/MIMIC-III-HEART-DISEASE/valid.jsonl" import json from pathlib import Path import numpy as np valid = [json.loads(line) for line in Path(gold_data_filepath).read_text().strip().split("\n")] texts = [example["text"] for example in valid] labels = np.asarray([example["label"] for example in valid]) ###Output _____no_output_____ ###Markdown (Noisy) Label the DataFirst, initialize the labeller> Note, this can take a few minutes as it loads the language model and resources into memory. ###Code from deep_patient_cohorts import NoisyLabeler labeler = NoisyLabeler() ###Output _____no_output_____ ###Markdown Although optional, it makes sense to preprocess the text with spaCy only one. We can do this easily like so> Note, this will take a few minutes per 1000 documents ###Code processed_texts = labeler.preprocess(texts) ###Output _____no_output_____ ###Markdown Then, label the data and check the accuracy of each labelling function ###Code noisy_labels = labeler.fit_lfs(processed_texts) _ = labeler.accuracy(noisy_labels=noisy_labels, gold_labels=labels) 1 LF 0: Accuracy 56%, Abstain rate 45% 2 LF 0: Accuracy 61%, Abstain rate 67% 3 neg 1 LF 0: Accuracy 61%, Abstain rate 0% 2 LF 0: Accuracy 65%, Abstain rate 22% 3 LF 0: Accuracy 66%, Abstain rate 35% ###Output _____no_output_____ ###Markdown Adding New LFs You may need to continually modify your LFs until they reach acceptable accuracy. The following example demonstrates how to add a new LF to the existing `labeler`, and evaluate its accuracy. ###Code from typing import List from deep_patient_cohorts import POSITIVE, NEGATIVE, ABSTAIN def heart_disease(self, texts: List[str]) -> List[int]: return [POSITIVE if "heart disease" in text.text.lower() else ABSTAIN for text in texts] labeler.add(heart_disease) noisy_labels = labeler.fit_lfs(processed_texts) labeler.accuracy(noisy_labels=noisy_labels, gold_labels=labels) ###Output _____no_output_____ ###Markdown Of course, you can also modify the `NoisyLabeler` code directly. Training a Label ModelUsing [FlyingSquid](https://github.com/HazyResearch/flyingsquid), we can train a probablistic model to combine our LFs (assuming we have at least 3!) ###Code labeler.fit_lm(noisy_labels=noisy_labels, gold_labels=labels) ###Output _____no_output_____ ###Markdown Alternatively, you can fit both the labelling functions and the label models in one step with```pythonlabeler.fit(noisy_labels=noisy_labels, gold_labels=labels)``` Removing LFsIf it turns out our new LF performs poorly, we can remove it and try again ###Code del labeler.lfs[-1] ###Output _____no_output_____ ###Markdown Copyright 2019 Google LLC.SPDX-License-Identifier: Apache-2.0**Notebook Version** - 1.0.0 **FEEDBACK REQUEST** thanks for checking out this notebook demoing our experimental API. After using this, we would greatly appreciate your feedback. You can send us feedback by:- Sending an issue request to the [datacommons issues page](https://github.com/google/datacommons/issues). When creating an issue please mark the issue using the **api feedback** label!**DISCLAIMER** this notebook uses an experimental version of the Data Commons Python Client API. The semantics and availability of this API may be subject to change without prior notice! Getting Started with the Python Client APIThis tutorial introduces the Data Commons open knowledge graph and discusses how to programmtically access its data through the Python Client API. We will use the task of plotting employment data provided by the Bureau of Labor Statistics as an example to demonstrate various functionalities supported by the Python Client API.Before proceeding, we will need to install the Python Client API package. ###Code # Install datacommons !pip install --upgrade --quiet git+https://github.com/datacommonsorg/[email protected] ###Output Building wheel for datacommons (setup.py) ... [?25l[?25hdone ###Markdown What is Data Commons?Data Commons is an open knowledge graph of structured data. It contains statements about real world objects such as* [Santa Clara County](https://browser.datacommons.org/kg?dcid=geoId/06085) is contained in the [State of California](https://browser.datacommons.org/kg?dcid=geoId/06)* The latitude of [Berkeley, CA](https://browser.datacommons.org/kg?dcid=geoId/0606000) is 37.8703* The [population of all persons in Maryland ](https://browser.datacommons.org/kg?dcid=dc/o/6w1c9qk7hxjch)has a total count of 5,996,080.In the graph, [*entities*](https://en.wikipedia.org/wiki/Entity) like [Santa Clara County](https://browser.datacommons.org/kg?dcid=geoId/06085) are represented by nodes. Every node is uniquely identified by its **`dcid`** (Data Commons Identifier) and has a **`type`** corresponding to what the node represents. For example, [California](https://browser.datacommons.org/kg?dcid=geoId/06) is identified by the dcid `geoId/06` and is of type [State](https://browser.datacommons.org/kg?dcid=State). *Relations* between entities are represented by directed edges between these nodes. For example, the statement "Santa Clara County is contained in the State of California" is represented in the graph as two nodes: "Santa Clara County" and "California" with an edge labeled "[containedInPlace](https://schema.org/containedInPlace)" pointing from Santa Clara to California. This can be visualized by the following diagram.![A portion of the Data Commons graph](https://storage.googleapis.com/notebook-resources/image-1.png)Here, we call the edge label, "containedInPlace", the *property label* (or *property* for short) associated with the above relation. We may also refer to "California" as the *property value* associated with Santa Clara County along property "containedInPlace". Notice that the direction is important! One can say that "Santa Clara County" is containedInPlace of "California", but "California" is certainly not contained in "Santa Clara County"! In general, how Data Commons models data is similar to the [Schema.org](https://schema.org) Data Model as Data Commons leverages schema.org to provide a common set of types and properties. For a broader discussion on how data is modeled in Data Commons, one can refer to documentation on the [Schema.org data model](https://schema.org/docs/datamodel.html). The Data Commons BrowserThroughout this tutorial, we will be using the [Data Commons browser](https://browser.datacommons.org). The browser provides a human readable way of navigating nodes within the knowledge graph. This is particularly useful for discovering what parameters to pass into the Python Client API in order to correctly query for nodes in the graph. The Python Client APIThe Python Client API provides functions for users to programmatically access nodes in the Data Commons open knowledge graph. In this tutorial, we will be demonstrating how to use the API access nodes in the Data Commons graph and store their information in a [Pandas](https://pandas.pydata.org) Data Frame. For a discussion on how to use the API generally, please refer to the [API Documentation](https://datacommons.readthedocs.io/en/dev2/).Let's begin by importing the Python Client and other helpful libraries. ###Code import datacommons as dc import pandas as pd import matplotlib.pyplot as plt from google.colab import drive import json # Pandas display options pd.options.display.max_rows = 10 pd.options.display.max_colwidth = 30 ###Output _____no_output_____ ###Markdown We will also need to provide an API key to the library. Please refer to [Getting Started](https://datacommons.readthedocs.io/en/latest/started.html) for more details about how to get an API key. Once you have a key, we can provided it to the library by calling [**`set_api_key`**](https://datacommons.readthedocs.io/en/latest/_autosummary/datacommons_utils/datacommons.utils.set_api_key.htmldatacommons.utils.set_api_key). ###Code # Mount the Drive drive.mount('/content/drive', force_remount=True) # REPLACE THIS with the path to your key if copying this notebook. key_path = '/content/drive/My Drive/DataCommons/secret.json' # Read the key in and provide it to the Data Commons API with open(key_path, 'r') as f: secrets = json.load(f) dc.set_api_key(secrets['dc_api_key']) ###Output Mounted at /content/drive ###Markdown To keep the API key a secret, we store it in a JSON file in Google Drive. The file is pointed to by `key_path` and takes the form```json{ "dc_api_key": "YOUR-API-KEY"}```The above cell reads the JSON file, and loads the key into the library. If you want to make a copy of this notebook, be sure to create your own `secret.json` file and provide its path to `key_path`. Using the API To Plot Unemployment DataThe [Bureau of Labor Statistics](https://www.bls.gov) provides a monthly count for number of individuals who are employed at the State, County, and City level. This data is surfaced in the Data Commons; for example, one can find employment statistics associated with Santa Clara County [here](https://browser.datacommons.org/kg?dcid=dc/p/y6xm2mny8mck1&db=). Our task for this tutorial will be to extract employment data associated with counties in California from Data Commons using the Python Client API and view it in a Pandas DataFrame. We will focus on how functions such as- `get_property_values`- `get_places_in`- `get_populations`- `get_observations`operate when using a Pandas DataFrame, as well as how statistical observations are modeled within the Data Commons graph. To begin, we will initialize a Pandas Data Frame with the dcid associated with California: [geoId/06](https://browser.datacommons.org/kg?dcid=geoId/06). ###Code # Initialize the Data Frame data = pd.DataFrame({'state': ['geoId/06']}) # View the frame print(data) ###Output state 0 geoId/06 ###Markdown Accessing Properties of a NodeFor all properties, one can use **`get_property_value`** to get the associated values. We would like to know that the dcid we have in our data frame belongs to California by getting the name of the node identified by "geoId/06". `get_property_value` accepts the following parameters.- **`dcids`** - A list or Pandas Series of dcids to get property values for.- **`prop`** - The property to get property values for.- **`out`**`[=True]` - An optional flag that indicates the property is oriented away from the given nodes if true.- **`value_type`**`[=None]` - An optional parameter which filters property values by the given type. - **`limit`**`[=100]` - An optional parameter which limits the total number of property values returned *aggregated over all given nodes*.When the dcids are given as a [Pandas Series](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html), the returned list of property values is a Pandas Series where the i-th entry corresponds to property values associated with the i-th given dcid. Some properties, like [containedInPlace](https://browser.datacommons.org/kg?dcid=containedInPlace), may have many property values. Consequently, the cells of the returned series will always contain a list of property values. Let's take a look: ###Code # Call get_property_values. Because the return value is a Pandas Series, we can # assign it directly to a column in our frame. data['state_name'] = dc.get_property_values(data['state'], 'name') # Display the frame print(data) ###Output state state_name 0 geoId/06 [California] ###Markdown For each list in the returned column, we may need to expand each element of the list into its own row. If one uses Pandas version >= 0.25, then this can easily be achieved by calling [`series.explode`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.explode.html). Otherwise, we can call a convenience function `flatten_frame`. ###Code # Call flatten_frame, but don't store the results. print(dc.flatten_frame(data)) ###Output state state_name 0 geoId/06 California ###Markdown We won't store the results of calling this for now in order to show case a feature in the next section! Working with Places in Data CommonsWe would now like to get all Counties contained in California. This can be achieved by calling `get_property_value`, but because a large fraction of use cases will involve accessing geographical locations, the API also implements a function, **`get_places_in`**, to get places contained within a list of given places. `get_places_in` accepts the following parameters.- **`dcids`** - A list or Pandas Series of dcids to get contained in places.- **`place_type`** - The type of places contained in the given dcids to query for.When dcids is specified as a Pandas Series, the return value is a Series with the same format as that returned by `get_property_values`. Let's call `get_places_in` to get all counties within California. ###Code # Call get_places_in to get all counties in California. Here the type we use is # "County". See https://browser.datacommons.org/kg?dcid=County for examples of # other nodes in the graph with type "County". data['county'] = dc.get_places_in(data['state'], 'County') # Display the frame print(data) ###Output state state_name county 0 geoId/06 [California] [geoId/06001, geoId/06003,... ###Markdown Notice that both `state_name` and `county` columns are columns with list. `flatten_frame` will automaticall flatten all columns that have lists in their cells. Optionally, one can provide the argument `cols` to specify which columns to flatten. ###Code # Call flatten frame data = dc.flatten_frame(data) # Display the frame print(data) ###Output state state_name county 0 geoId/06 California geoId/06001 1 geoId/06 California geoId/06003 2 geoId/06 California geoId/06005 3 geoId/06 California geoId/06007 4 geoId/06 California geoId/06009 .. ... ... ... 53 geoId/06 California geoId/06107 54 geoId/06 California geoId/06109 55 geoId/06 California geoId/06111 56 geoId/06 California geoId/06113 57 geoId/06 California geoId/06115 [58 rows x 3 columns] ###Markdown Let's now get the names for each column like above. Notice we need to unroll the `county` column, as the input to `get_property_values` is a Series of dcids and not a Series of lists of dcids. ###Code # Get the names of all counties in California. data['county_name'] = dc.get_property_values(data['county'], 'name') data = dc.flatten_frame(data) # Display the frame data dc.get_places_in(['geoId/06'], 'County') ###Output _____no_output_____ ###Markdown Working with Statistical ObservationsFinally, we are ready to query for Employment statistics in Data Commons. Before proceeding, we discuss briefly about how Data Commons models statistics. Statistical observations can be separated into two types of entities: the *statistical population* that the observation is describing, and the *observation* itself. A statistic such as> The number of employed individuals living in Santa Clara in January 2018 was 1,015,129can thus be represented by two entities: one capturing the population of *all persons who are unemployed* and another capturing the observation 1,015,129 made in January 2018. Data Commons represents these two entity types as `StatisticalPopulation` and `Observation`. Let's now focus on each separate one. StatisticalPopulationsConsider the [node's browser page](https://browser.datacommons.org/kg?dcid=dc/p/y6xm2mny8mck1&db=) representing the `StatisticalPopulation` of all employed persons living in Santa Clara County. ![The population of employed individuals in Santa Clara County](https://storage.googleapis.com/notebook-resources/image-2.png)At the top of the browser page, we are presented a few properties of this node to consider:- The **`typeOf`** this node is `StatisticalPopulation` as expected.- The **`populationType`** is `Person` telling us that this is a statistical population describing persons.- The **`location`** of this node is [Santa Clara County](https://browser.datacommons.org/kg?dcid=geoId/06085) telling us that the persons in this statistical population live in Santa Clara County.- The **`dcid`** property tells us the dcid of this node- The **`localCuratorLevelId`** tells us information about how this node was uploaded to the graph. For the purposes of this tutorial, we can ignore this field.There are two other properties defined: `numConstraints` and `employment`. These two properties help us describe entities contained in this statistical population. Properties used to describe the entities captured by a StatisticalPopulation are called *constraining properties*. In the example above, `employment=BLS_Employed` is a constraining property that tells us the Statistical Population captures employed persons. `numConstraints` denotes how many constraining properties there are, and in the example above, `numConstraints=1` tells us that `employment` is the only constraining property.To query for `StatisticalPopulation`s using the Data Commons Python Client API, we call **`get_populations`**. The function accepts the following parameters.- **`dcids`** - A list or Pandas Series of dcids denoting the locations of populations to query for.- **`population_type`** - The `populationType` of the `StatisticalPopulation`- **`constraining_properties`**`[={}]` - An optional map from constraining property to the value that the `StatisticalPopulation` should be constrained by.When a Pandas Series is provided to `dcids`, the return value is a Series with each cell populated by a single dcid and not a list. This is because the combination of `dcids`, `population_type`, and optionally `constraining_properties` always map to a unique statistical population `dcid` if it exists.Let's call `get_populations` to get the populations of all employed individuals living in counties specified by the `county` column of our DataFrame. ###Code # First we create the constraining_properties map props = {'employment': 'BLS_Employed'} # We now call get_populations. data['employed_pop'] = dc.get_populations( data['county'], 'Person', constraining_properties=props) # Display the DataFrame. Notice that we don't need to flatten the frame. data ###Output _____no_output_____ ###Markdown ObservationsAt the bottom of the page describing the `StatisticalPopulation` of all employed persons living in Santa Clara County is a list of observations made of that population. This is the [browser page](https://browser.datacommons.org/kg?dcid=dc/o/b5ylgwwh1d5s1) for the node representing the observed count of this population for January 2018.![The number of employed individuals in Santa Clara County](https://storage.googleapis.com/notebook-resources/image-3.png)In this page, there are a few properties to consider:- The **`typeOf`** this node is `Observation` as expected.- The **`measuredProperty`** is `count` telling us that this observation measures the number of persons in this statistical population.- The **`measurementMethod`** is `BLSSeasonallyUnadjusted` to indicate how the observation was adjusted. We can click on that link to see what `BLSSeasonallyUnadjusted` means.- The **`observationDate`** is `2018-01`. This date is formatted by [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) standards.- The **`observedPeriod`** is `P1M` to denote that this observation was carried over a period of 1 month.- The **`observedNode`** which tells us what the node is being observed by the observation. Here its value is `dc/p/y6xm2mny8mck1` which is the dcid of the population of employed persons living in Santa Clara.The final property of interest is **`measuredValue`**. This property tells us that the raw value observed by the observation (whose value is 1,015,129) in this case. The `measuredValue` is also a *statistic type* associated with the observation. For a single observation, there could be many statistics that describe it. One would be the raw value represented by `measuredValue`, while others include `meanValue`, `medianValue`, `marginOfError`, and more.These parameters are useful for deciding what values to provide to the API. To query for `Observation`s using the Python Client API we call **`get_observations`** which accepts the following parameters.- **`dcids`** - A list or Pandas Series of dcids of nodes that are observed by observations being queried for.- **`measured_property`** - The `measuredProperty` of the observation.- **`stats_type`** - The statistical type of the observation.- **`observation_date`** - The `observationDate` of the observation.- **`observation_period`**`[=None]` - An optional parameter specifying the `observationPeriod` of the observation.- **`measurement_method`**`[=None]` - An optional parameter specifying the `measurementMethod` of the observation.One thing to note is that not all `Observation`s will have a property value for `observationPeriod` and `measurementMethod`. For example, [the number of housing units with 3 bedrooms in Michigan](https://browser.datacommons.org/kg?dcid=dc/o/6x1emqkzvvjq2&db=) does not have `observationPeriod` while [number of Other Textile Mills](https://browser.datacommons.org/kg?dcid=dc/o/mc6yc1v1004y5&db=) does not have `measurementMethod`. These parameters are thus optional arguments to the `get_observations` API function.When a Pandas Series is provided to `dcids`, the return value is again a Series with each cell populated by the statistic and not a list. The combination of the above parameters always map to a unique observation if it exists. If the statistic does not exist for the given parameters, then the cell will contain `NaN`.Let's get the `measuredValue` of observations of the column of populations we just queried for. ###Code # Call get_observations. We are passing into the parameters the values that we # saw in the link above. data['employed_count'] = dc.get_observations( data['employed_pop'], 'count', 'measuredValue', '2018-12', observation_period='P1M', measurement_method='BLSSeasonallyUnadjusted') # Display the DataFrame data ###Output _____no_output_____ ###Markdown Wrapping Things UpNow that we have our DataFrame populated, we can use the Pandas library to analyze the data. Let's plot the counts in a bar chart to see what we find. ###Code # Sort by employment count final_data = data.copy() final_data = final_data.sort_values('employed_count', ascending=False) # Plot the bar chart final_data.plot.bar(x='county_name', y='employed_count', rot=90, figsize=(12, 8)) plt.show() ###Output _____no_output_____ ###Markdown MovieLens Fairness Gym Environment ###Code # we start by importing the environment module from pydeeprecsys.movielens_fairness_env import MovieLensFairness import gym # then we can create a gym environment with a particular slate size env = gym.make('MovieLensFairness-v0', slate_size=3) type(env) ###Output _____no_output_____ ###Markdown Now we can understand what are states, actions and rewards in this environment. Let's start with state ###Code env.observation_space ###Output _____no_output_____ ###Markdown So we have 25 variables, in the (0,1) range:- 4 variables represent scaled (sex, age, occupation, zip code)- 19 variables, that are one-hot encodings for the category of the latest reviewed movie. Categories include Action, Crime, Thriller, and so on- 1 variable for the user's 5 star rating.- 1 variable for the movie's violence score.Let's see how they look like ###Code env.reset() ###Output _____no_output_____ ###Markdown As you can see, the first 4 variables are set. Under the hood, the environment sampled a user from the user pool, and prepared it. The remaining variables are `0` because no movie has been recommended yet. Let's see how recommendations are made: ###Code env.action_space ###Output _____no_output_____ ###Markdown Since we set our `slate_size` to `3`, this means that at each step the agent must recommend `3` movies. Recommendations are made based on the (discrete) movie identifier, that's why the action space is of type `MultiDiscrete`. `3883` is the amount of available movies. Let's make a recommendation: ###Code random_slate = env.action_space.sample() random_slate state, reward, done, info = env.step(random_slate) ###Output _____no_output_____ ###Markdown After receiving a recommendation, the user makes a choice, and reviews the movie. Thanks to `Recsim` and `MLFairnessGym` we can:- affect the behavior of users after being exposed to movies, by encoding addiction/boredom dynamics into the user embedding- encode the violence score into the reward, so that recommending too many violent movies brings a negative reward Let's see the new state: ###Code state ###Output _____no_output_____ ###Markdown We can also inspect the reward, which is in range (0,1): ###Code reward ###Output _____no_output_____ ###Markdown And we can check if the episode is done. Currently, episodes are finished when the simulated user has rated `50` movies. ###Code done ###Output _____no_output_____ ###Markdown Training a Random AgentThe `Manager` class facilitates a lot of things like training, hyperparameter optimization, and so on. ###Code from pydeeprecsys.rl.manager import MovieLensFairnessManager from pydeeprecsys.rl.agents.agent import RandomAgent from pydeeprecsys.rl.learning_statistics import LearningStatistics manager = MovieLensFairnessManager(slate_size=1) manager.print_overview() agent = RandomAgent(action_space=manager.env.action_space) stats = LearningStatistics() manager.train(agent, stats, max_episodes=200, should_print=True) stats.plot_learning_stats() ###Output _____no_output_____ ###Markdown Run in Google Colab View source on GitHub Install BeautifulSauce ###Code !pip install BeautifulSauce --upgrade from BeautifulSauce import Sauce from BeautifulSauce.featurizer import Featurizer import re ###Output _____no_output_____ ###Markdown UtilitiesBeautifulSauce comes with a few handy utilities and functionalities that are built on top of the BeautifulSoup object. Reading from fileBeautifulSauce has a built in class method to simply init soup `from_file(filepath)`. Here, we show how that works after creating and saving an example HTML file. ###Code # First, we'll make a file and save it to the Colab server html_str = """ <!DOCTYPE html> <html> <head> <meta charset="utf-8"></meta> </head> <body> <div style="font-weight: bold;"> <div> bold text here <p>bold text</p> <div style="font-weight: normal;"> normal text <p>normal text</p> </div> </div> </div> <div> <div> normal text </div> <div> <b>bold text</b> </div> </div> </body> </html> """ with open("example.html","w") as w: w.write(html_str) soup = Sauce.from_file('example.html') print(soup) ###Output <!DOCTYPE html> <html> <head> <meta charset="utf-8"/> </head> <body> <div style="font-weight: bold;"> <div> bold text here <p> bold text </p> <div style="font-weight: normal;"> normal text <p> normal text </p> </div> </div> </div> <div> <div> normal text </div> <div> <b> bold text </b> </div> </div> </body> </html> ###Markdown Reading from URLAdditionally, instead of having to write the boilerplate necessary for initializing Soup objects from a hosted URL, you can just call the `from_url(url)` function of the Sauce class. ###Code url = "https://en.wikipedia.org/wiki/Grace_Hopper" soup = Sauce.from_url(url) tag = soup.find(id='firstHeading') print(tag.text) ###Output Grace Hopper ###Markdown Indexing HTML treesBeautifulSauce also adds a unique attribute to each tag: "idx". This attribute denotes the position of the tag within the HTML tree. Take a look at the example below... ###Code soup = Sauce.from_file('example.html') print(soup) for tag in soup.find_all(): print("Tag Name: {:4s} | Tag idx: {}".format(tag.name, tag.idx)) ###Output <!DOCTYPE html> <html> <head> <meta charset="utf-8"/> </head> <body> <div style="font-weight: bold;"> <div> bold text here <p> bold text </p> <div style="font-weight: normal;"> normal text <p> normal text </p> </div> </div> </div> <div> <div> normal text </div> <div> <b> bold text </b> </div> </div> </body> </html> Tag Name: html | Tag idx: [0] Tag Name: head | Tag idx: [0, 0] Tag Name: meta | Tag idx: [0, 0, 0] Tag Name: body | Tag idx: [0, 1] Tag Name: div | Tag idx: [0, 1, 0] Tag Name: div | Tag idx: [0, 1, 0, 0] Tag Name: p | Tag idx: [0, 1, 0, 0, 0] Tag Name: div | Tag idx: [0, 1, 0, 0, 1] Tag Name: p | Tag idx: [0, 1, 0, 0, 1, 0] Tag Name: div | Tag idx: [0, 1, 1] Tag Name: div | Tag idx: [0, 1, 1, 0] Tag Name: div | Tag idx: [0, 1, 1, 1] Tag Name: b | Tag idx: [0, 1, 1, 1, 0] ###Markdown ---**You can get tags by their `.idx` attribute by calling `sauce.get_from_idx(indices)`** ###Code soup.get_from_idx([0,1,0,0,0]).name ###Output _____no_output_____ ###Markdown Featurization of HTML DocumentsThe main functionality of BeautifulSauce is to help you featurize HTML documents. What does this mean? Well, basically, you are able to add attributes to `Tag` elements via the `BeautifulSauce.features.Featurizer`. These attributes can be: - Categorical - Downstream, you can automatically dummy code these in a dataframe (if you want to). - Numerical - Downstream, you can standardize these on scale of 0.0-1.0 (if you want). - TextAny feature can be added via one of the 3 built in decorators inside the Featurizer. - @ftrs.add_categorical_feature - @ftrs.add_numerical_feature - @ftrs.add_text_feature Let's walk through an example... ###Code # Initialize a Featurizer ftrs = Featurizer() # Add a categorical feature @ftrs.add_categorical_feature("tag_name") def f_tag_name(tag): return tag.name # Add numerical feature @ftrs.add_numerical_feature('char_cnt') def f_char_cnt(tag): if tag.name in ['head', 'meta', 'script']: return 0 texts = list(tag.find_all(text=True, recursive=False)) if len(texts) < 1: return 0 texts = " ".join(texts).strip() texts = re.sub("\n", " ", texts) return len(texts) # Add text feature @ftrs.add_text_feature('text') def f_text(tag): if tag.name in ['head', 'meta', 'script']: return "" texts = list(tag.find_all(text=True, recursive=False)) if len(texts) < 1: return "" texts = " ".join(texts).strip() texts = re.sub("\n", " ", texts) return texts # Read in soup from file soup = Sauce.from_file('example.html') # Apply your featurizer to this soup object ftrs.featurize(soup) # Take a look at what the .features attribute looks like for tag in soup.find_all(): print(tag.features) ###Output {'categorical': {'tag_name': 'html'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'head'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'meta'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'body'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 14}, 'text': {'text': 'bold text here'}} {'categorical': {'tag_name': 'p'}, 'numerical': {'char_cnt': 9}, 'text': {'text': 'bold text'}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 11}, 'text': {'text': 'normal text'}} {'categorical': {'tag_name': 'p'}, 'numerical': {'char_cnt': 11}, 'text': {'text': 'normal text'}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 11}, 'text': {'text': 'normal text'}} {'categorical': {'tag_name': 'div'}, 'numerical': {'char_cnt': 0}, 'text': {'text': ''}} {'categorical': {'tag_name': 'b'}, 'numerical': {'char_cnt': 9}, 'text': {'text': 'bold text'}} ###Markdown --- to_dataframe()Now, it's pretty clear that the `soup.features` attribute, while helpful, would be potentially annoying to interface with when using the soup object itself. The true purpose of this attribute is to aid in outputting these features to a Pandas DataFrame. Let's do that now... ###Code df = ftrs.to_dataframe(soup) df ###Output _____no_output_____ ###Markdown Normalization + Dummy CodingAs mentioned previously, there is a reason why the decorator functions are separated out into categorical, numerical, and text based features. It is so we can both dummy code the categorical features, and standardize the numerical features if we choose to do so. This happens in the `ftrs.to_dataframe(soup)` function. Take a look... ###Code df = ftrs.to_dataframe(soup, normalize=True) df ###Output _____no_output_____ ###Markdown Getting started with OGGM: a real case study, step by step The OGGM workflow is best explained with an example. In the following, we will show how to apply the standard [OGGM workflow](http://docs.oggm.org/en/stable/introduction.html) to a list of glaciers. This example is meant to guide you through a first-time setup step-by-step. If you prefer not to install OGGM on your computer, you can always run this notebook in [OGGM-Edu](https://edu.oggm.org) instead! Set-up Input data folders **If you are using your own computer**: before you start, make sure that you have set-up the [input data configuration file](https://docs.oggm.org/en/stable/input-data.html) at your wish.In the course of this tutorial, we will need to download data needed for each glacier (a couple of mb at max, depending on the chosen glaciers), so make sure you have an internet connection. cfg.initialize() and cfg.PARAMS An OGGM simulation script will always start with the following commands: ###Code from oggm import cfg, utils cfg.initialize(logging_level='WARNING') ###Output _____no_output_____ ###Markdown A call to [cfg.initialize()](https://docs.oggm.org/en/latest/generated/oggm.cfg.initialize.html) will read the default parameter file (or any user-provided file) and make them available to all other OGGM tools via the `cfg.PARAMS` dictionary. Here are some examples of these parameters: ###Code cfg.PARAMS['prcp_scaling_factor'], cfg.PARAMS['ice_density'], cfg.PARAMS['continue_on_error'] ###Output _____no_output_____ ###Markdown See [here](https://github.com/OGGM/oggm/blob/master/oggm/params.cfg) for the default parameter file and a description of their role and default value. ###Code # You can try with or without multiprocessing: with two glaciers, OGGM could run on two processors cfg.PARAMS['use_multiprocessing'] = True ###Output _____no_output_____ ###Markdown Workflow In this section, we will explain the fundamental concepts of the OGGM workflow:- Working directories- Glacier directories- Tasks ###Code from oggm import workflow ###Output _____no_output_____ ###Markdown Working directory Each OGGM run needs a **single folder** where to store the results of the computations for all glaciers. This is called a "working directory" and needs to be specified before each run. Here we create a temporary folder for you: ###Code cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-GettingStarted', reset=True) cfg.PATHS['working_dir'] ###Output _____no_output_____ ###Markdown We use a temporary directory for this example, but in practice you will set this working directory yourself (for example: `/home/john/OGGM_output`. The size of this directory will depend on how many glaciers you'll simulate!**This working directory is meant to be persistent**, i.e. you can stop your processing workflow after any task, and restart from an existing working directory at a later stage. Define the glaciers for the run ###Code rgi_ids = ['RGI60-11.01328', 'RGI60-11.00897'] ###Output _____no_output_____ ###Markdown You can provide any number of glacier identifiers to OGGM. In this case, we chose: - `RGI60-11.01328`: [Unteraar Glacier](https://en.wikipedia.org/wiki/Unteraargletscher) in the Swiss Alps- `RGI60-11.00897`: [Hintereisferner](http://acinn.uibk.ac.at/research/ice-and-climate/projects/hintereisferner) in the Austrian Alps.Here is a list of other glaciers you might want to try out:- `RGI60-18.02342`: Tasman Glacier in New Zealand- `RGI60-11.00787`: [Kesselwandferner](https://de.wikipedia.org/wiki/Kesselwandferner) in the Austrian Alps- ... or any other glacier identifier! You can find other glacier identifiers by exploring the [GLIMS viewer](https://www.glims.org/maps/glims).For an operational run on an RGI region, you might want to download the [Randolph Glacier Inventory](https://www.glims.org/RGI/) dataset instead, and start a run from it. This case is covered in the [working with the RGI](working_with_rgi.ipynb) tutorial. Glacier directories The OGGM workflow is organized as a list of **tasks** that have to be applied to a list of glaciers. The vast majority of tasks are called **entity tasks**: they are standalone operations to be realized on one single glacier entity. These tasks are executed sequentially (one after another): they often need input generated by the previous task(s): for example, the climate calibration needs the glacier flowlines, which can be only computed after the topography data has been processed, and so on.To handle this situation, OGGM uses a workflow based on data persistence on disk: instead of passing data as python variables from one task to another, each task will read the data from disk and then write the computation results back to the disk, making these new data available for the next task in the queue.These glacier specific data are located in [glacier directories](https://docs.oggm.org/en/latest/glacierdir.htmlglacier-directories). In the model, these directories are initialized with the following command (this can take a little while on the first call, as OGGM needs to download some data): ###Code # Where to fetch the pre-processed directories gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=80) ###Output _____no_output_____ ###Markdown - the keyword `from_prepro_level` indicates that we will start from [pre-processed directories](https://docs.oggm.org/en/latest/input-data.htmlpre-processed-directories), i.e. data that are already pre-processed for the model users. In many cases you will want to start from level 3, 4 or 5. Here we start from level 3 and rerun some of the processing in order to demonstrate the OGGM workflow.- the `prepro_border` keyword indicates the number of DEM grid points which we'd like to add to each side of the glacier for the local map: the larger the glacier will grow, the larger the border parameter should be. The available pre-processed border values are: **10, 80, 160** (depending on the model set-ups there might be more or less options). These are the fixed map sizes we prepared for you - any other map size will require a full processing (see the [alternative DEM example](dem_sources.ipynb) for a tutorial). The [init_glacier_directories](https://docs.oggm.org/en/latest/generated/oggm.workflow.init_glacier_directories.htmloggm.workflow.init_glacier_directories) task will allways be the very first task to call for all your OGGM experiments. Let's see what it gives us back: ###Code type(gdirs), type(gdirs[0]) ###Output _____no_output_____ ###Markdown `gdirs` is a list of [GlacierDirectory](https://docs.oggm.org/en/latest/generated/oggm.GlacierDirectory.htmloggm.GlacierDirectory) objects (one for each glacier). **Glacier directories are used by OGGM as "file and attribute manager"** for single glaciers. For example, the model now knows where to find the topography data file for this glacier: ###Code gdir = gdirs[0] # take Unteraar glacier print('Path to the DEM:', gdir.get_filepath('dem')) ###Output _____no_output_____ ###Markdown And we can also access some attributes of this glacier: ###Code gdir gdir.rgi_date # date at which the outlines are valid ###Output _____no_output_____ ###Markdown The advantage of this Glacier Directory data model is that it simplifies greatly the data transfer between tasks. **The single mandatory argument of all entity tasks will allways be a glacier directory**. With the glacier directory, each task will find the input it needs: for example, both the glacier's topography and outlines are needed for the next plotting function, and both are available via the `gdir` argument: ###Code from oggm import graphics graphics.plot_domain(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Tasks There are two different types of "[tasks](http://docs.oggm.org/en/latest/api.htmlentity-tasks)":**Entity Tasks**: Standalone operations to be realized on one single glacier entity, independently from the others. The majority of OGGM tasks are entity tasks. They are parallelisable: the same task can run on several glaciers in parallel.**Global Task**: Tasks which require to work on several glacier entities at the same time. Model parameter calibration or the compilation of several glaciers' output are examples of global tasks. OGGM implements a simple mechanism to run a specific task on a list of `GlacierDirectory` objects: ###Code from oggm import tasks # run the glacier_masks task on all gdirs workflow.execute_entity_task(tasks.glacier_masks, gdirs); ###Output _____no_output_____ ###Markdown The task we just applied to our list of glaciers is [glacier_masks](http://docs.oggm.org/en/latest/generated/oggm.tasks.glacier_masks.htmloggm.tasks.glacier_masks). It wrote a new file in our glacier directory, providing raster masks of the glacier (among other things): ###Code print('Path to the masks:', gdir.get_filepath('gridded_data')) ###Output _____no_output_____ ###Markdown It is also possible to apply several tasks sequentially (i.e. one after an other) on our glacier list: ###Code list_talks = [ tasks.compute_centerlines, tasks.initialize_flowlines, tasks.compute_downstream_line, ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The function `execute_task` can run a task on different glaciers at the same time, if the `use_multiprocessing` option is set to `True` in the configuration file. Among other things, we computed the glacier flowlines and the glacier's downstream line. We can now plot them: ###Code graphics.plot_centerlines(gdir, figsize=(8, 7), use_flowlines=True, add_downstream=True) ###Output _____no_output_____ ###Markdown As a result, the glacier directories now store many more files. If you are interested, you can have a look: ###Code import os print(os.listdir(gdir.dir)) ###Output _____no_output_____ ###Markdown For a short explanation of what these files are, see the [glacier directory documentation](https://docs.oggm.org/en/latest/api.htmlcfg-basenames). In practice, however, you will only rarely need to access these files yourself. Other preprocessing tasks Let's continue with the other preprocessing tasks: ###Code list_talks = [ tasks.catchment_area, tasks.catchment_width_geom, tasks.catchment_width_correction, tasks.compute_downstream_bedshape ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown We just computed the catchment areas of each flowline (the colors are arbitrary): ###Code graphics.plot_catchment_areas(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Each flowline now knows what area will contribute to its surface mass-balance and ice flow. Accordingly, it is possible to compute each glacier cross-section's width, and correct it so that the total glacier area and elevation distribution is conserved: ###Code graphics.plot_catchment_width(gdir, corrected=True, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Climate tasks The glacier directories we downloaded already contains the climate timeseries for each glacier (`from_prepro_level=3`). Let's have a look at them: ###Code import xarray as xr fpath = gdir.get_filepath('climate_historical') ds = xr.open_dataset(fpath) # Data is in hydrological years # -> let's just ignore the first and last calendar years ds.temp.resample(time='AS').mean()[1:-1].plot(); ###Output _____no_output_____ ###Markdown This climate data is called the "baseline climate" for this glacier. It will be used for the mass-balance model calibration, and at the end of this tutorial also to generate the random climate to drive a simulation. When running OGGM with GCM data, the GCM timeseries will be computed as anomalies to this baseline timeseries, hence the name.Here we are using CRU, but OGGM-Shop also allows to use ERA5 and CERA as baseline.Now, let's calibrate the mass-balance model for this glacier. The calibration procedure of OGGM is ... original, but is also quite powerful. Read the [doc page](https://docs.oggm.org/en/latest/mass-balance.html) or the [GMD paper](https://www.geosci-model-dev-discuss.net/gmd-2018-9/) for more details, and you can also follow the [mass-balance calibration tutorial](massbalance_calibration.ipynb) explaining some of the model internals.The default calibration process is automated: ###Code # Fetch the reference t* list and associated model parameters params_url = 'https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/RGIV62/CRU/centerlines/qc3/pcp2.5' workflow.download_ref_tstars(base_url=params_url) # Now calibrate workflow.execute_entity_task(tasks.local_t_star, gdirs); workflow.execute_entity_task(tasks.mu_star_calibration, gdirs); ###Output _____no_output_____ ###Markdown **¡Important!** The calibration of the mass-balance model is automated only for certain parameter combinations of the model - any change in the mass-balance model settings (e.g. the melt threshold, the precipitation correction factor, etc.) will require a re-calibration of the model (see the [mass-balance calibration tutorial](massbalance_calibration.ipynb) for an introduction to this topic). From there, OGGM can now compute the mass-balance for these glaciers. For example: ###Code from oggm.core.massbalance import MultipleFlowlineMassBalance gdir_hef = gdirs[1] mbmod = MultipleFlowlineMassBalance(gdir_hef, use_inversion_flowlines=True) import numpy as np import matplotlib.pyplot as plt years = np.arange(1902, 2017) mb_ts = mbmod.get_specific_mb(year=years) plt.plot(years, mb_ts); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown For the Hintereiferner (not for Unteraar where no observational data is available), we can also compare our computed mass-balance to the measured one: ###Code mbdf = gdir_hef.get_ref_mb_data() mbdf['OGGM'] = mbmod.get_specific_mb(year=mbdf.index) mbdf[['ANNUAL_BALANCE', 'OGGM']].plot(); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown This graphic is interesting because it shows an effect often observed when comparing the computed mass balance to the observed one: since (in this case) the OGGM geometry is fixed with time, the modelled specific mass-balance series are likely to have a stronger trend than the observed ones.To assess the results of the OGGM mass-balance model for all WGMS glaciers worldwide, visit the [score summary](https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/RGIV62/CRU/centerlines/qc3/pcp2.5/_scores/) for this particular model settings. Computing the ice thickness ("inversion") With the computed mass-balance and the flowlines, OGGM can now compute the ice thickness, based on the principles of [mass conservation and ice dynamics](http://docs.oggm.org/en/latest/inversion.html). ###Code list_talks = [ tasks.prepare_for_inversion, # This is a preprocessing task tasks.mass_conservation_inversion, # This does the actual job tasks.filter_inversion_output # This smoothes the thicknesses at the tongue a little ] for task in list_talks: workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The ice thickness is computed for all sections along the flowline, and can be displayed with the help of OGGM's graphics module: ###Code graphics.plot_inversion(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown The inversion is realized with the default parameter settings: it must be noted that the model is sensitive to the choice of some of them, most notably the creep parameter A: ###Code cfg.PARAMS['inversion_glen_a'] a_factor = np.linspace(0.1, 10., 100) volume = [] for f in a_factor: # Recompute the volume without overwriting the previous computations v = tasks.mass_conservation_inversion(gdir, glen_a=f * cfg.PARAMS['inversion_glen_a'], write=False) volume.append(v * 1e-9) plt.plot(a_factor, volume); plt.title('Unteraar total volume'); plt.ylabel('Volume (km$^3$)'); plt.xlabel('Glen A factor (1 = default)'); ###Output _____no_output_____ ###Markdown There is no simple way to find the best A for each individual glacier. It can easily vary by a factor of 10 (or more) from one glacier to another. At the global scale, the "best" A is close to the default value (possibly between 1 and 1.5 times larger). The default parameter a good choice in a first step but be aware that reconstructions based on this default parameter might be very uncertain! See our [ice thickness inversion tutorial](inversion.ipynb) for a more in-depth discussion. Simulations For most applications, this is where the fun starts! With climate data and an estimate of the ice thickness, we can now start transient simulations. For this tutorial, we will show how to realize idealized experiments based on the baseline climate only, but it is also possible to drive OGGM with real GCM data. ###Code # Convert the flowlines to a "glacier" for the ice dynamics module workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs); ###Output _____no_output_____ ###Markdown Let's start a run driven by a the climate of the last 31 years, shuffled randomly for 200 years. This can be seen as a "commitment" simulation, i.e. how much glaciers will change even without further climate change: ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, y0=2000, output_filesuffix='_2000'); ###Output _____no_output_____ ###Markdown The output of this simulation is stored in two separate files: a diagnostic file (which contains time series variables such as length, volume, ELA, etc.) and a full model output file, which is larger but allows to reproduce the full glacier geometry changes during the run.In practice, the diagnostic files are often compiled for the entire list of glaciers: ###Code ds2000 = utils.compile_run_output(gdirs, filesuffix='_2000') ###Output _____no_output_____ ###Markdown This dataset is also stored on disk (in the working directory) as NetCDF file for later use. Here we can access it directly: ###Code ds2000 ###Output _____no_output_____ ###Markdown We opened the file with [xarray](http://xarray.pydata.org), a very useful data analysis library based on [pandas](http://pandas.pydata.org/). For example, we can plot the volume and length evolution of both glaciers with time: ###Code f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4)) ds2000.volume.plot.line(ax=ax1, hue='rgi_id'); ds2000.length.plot.line(ax=ax2, hue='rgi_id'); ###Output _____no_output_____ ###Markdown The full model output files can be used for plots: ###Code f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 6)) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=0, ax=ax1, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=50, ax=ax2, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=150, ax=ax3, vmax=350) plt.tight_layout(); ###Output _____no_output_____ ###Markdown Sensitivity to temperature Now repeat our simulations with a +0.5°C and -0.5°C temperature bias, which for a glacier is quite a lot! ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=0.5, y0=2000, output_filesuffix='_p05'); workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=-0.5, y0=2000, output_filesuffix='_m05'); dsp = utils.compile_run_output(gdirs, filesuffix='_p05') dsm = utils.compile_run_output(gdirs, filesuffix='_m05') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4)) rgi_id = 'RGI60-11.01328' ds2000.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='Commitment'); ds2000.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); ds2000.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$+$ 0.5°C'); dsp.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$-$ 0.5°C'); dsm.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); ax1.legend(); ###Output _____no_output_____ ###Markdown JupyterDashThe `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. ###Code from jupyter_dash import JupyterDash import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd ###Output _____no_output_____ ###Markdown When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration. ###Code JupyterDash.infer_jupyter_proxy_config() ###Output _____no_output_____ ###Markdown Load and preprocess data ###Code df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv') available_indicators = df['Indicator Name'].unique() ###Output _____no_output_____ ###Markdown Construct the app and callbacks ###Code external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__, external_stylesheets=external_stylesheets) # Create server variable with Flask server object for use with gunicorn server = app.server app.layout = html.Div([ html.Div([ html.Div([ dcc.Dropdown( id='crossfilter-xaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Fertility rate, total (births per woman)' ), dcc.RadioItems( id='crossfilter-xaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='crossfilter-yaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Life expectancy at birth, total (years)' ), dcc.RadioItems( id='crossfilter-yaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}) ], style={ 'borderBottom': 'thin lightgrey solid', 'backgroundColor': 'rgb(250, 250, 250)', 'padding': '10px 5px' }), html.Div([ dcc.Graph( id='crossfilter-indicator-scatter', hoverData={'points': [{'customdata': 'Japan'}]} ) ], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}), html.Div([ dcc.Graph(id='x-time-series'), dcc.Graph(id='y-time-series'), ], style={'display': 'inline-block', 'width': '49%'}), html.Div(dcc.Slider( id='crossfilter-year--slider', min=df['Year'].min(), max=df['Year'].max(), value=df['Year'].max(), marks={str(year): str(year) for year in df['Year'].unique()}, step=None ), style={'width': '49%', 'padding': '0px 20px 20px 20px'}) ]) @app.callback( dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value'), dash.dependencies.Input('crossfilter-year--slider', 'value')]) def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value): dff = df[df['Year'] == year_value] return { 'data': [dict( x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'], y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'], text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], mode='markers', marker={ 'size': 25, 'opacity': 0.7, 'color': 'orange', 'line': {'width': 2, 'color': 'purple'} } )], 'layout': dict( xaxis={ 'title': xaxis_column_name, 'type': 'linear' if xaxis_type == 'Linear' else 'log' }, yaxis={ 'title': yaxis_column_name, 'type': 'linear' if yaxis_type == 'Linear' else 'log' }, margin={'l': 40, 'b': 30, 't': 10, 'r': 0}, height=450, hovermode='closest' ) } def create_time_series(dff, axis_type, title): return { 'data': [dict( x=dff['Year'], y=dff['Value'], mode='lines+markers' )], 'layout': { 'height': 225, 'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10}, 'annotations': [{ 'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom', 'xref': 'paper', 'yref': 'paper', 'showarrow': False, 'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)', 'text': title }], 'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'}, 'xaxis': {'showgrid': False} } } @app.callback( dash.dependencies.Output('x-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value')]) def update_y_timeseries(hoverData, xaxis_column_name, axis_type): country_name = hoverData['points'][0]['customdata'] dff = df[df['Country Name'] == country_name] dff = dff[dff['Indicator Name'] == xaxis_column_name] title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name) return create_time_series(dff, axis_type, title) @app.callback( dash.dependencies.Output('y-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value')]) def update_x_timeseries(hoverData, yaxis_column_name, axis_type): dff = df[df['Country Name'] == hoverData['points'][0]['customdata']] dff = dff[dff['Indicator Name'] == yaxis_column_name] return create_time_series(dff, axis_type, yaxis_column_name) ###Output _____no_output_____ ###Markdown Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.This makes it possible to iterativly update the app without rerunning the potentially expensive data processing steps. ###Code app.run_server() ###Output _____no_output_____ ###Markdown By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell. ###Code app.run_server(mode="inline") ###Output _____no_output_____ ###Markdown JupyterDashThe `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab.Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. ###Code !pip install jupyter_dash from jupyter_dash import JupyterDash import dash import dash_core_components as dcc import dash_html_components as html import pandas as pd ###Output _____no_output_____ ###Markdown When running in JupyterHub or Binder, call the `infer_jupyter_config` function to detect the proxy configuration. ###Code JupyterDash.infer_jupyter_proxy_config() ###Output _____no_output_____ ###Markdown Load and preprocess data ###Code df = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv') available_indicators = df['Indicator Name'].unique() ###Output _____no_output_____ ###Markdown Construct the app and callbacks ###Code external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__, external_stylesheets=external_stylesheets) # Create server variable with Flask server object for use with gunicorn server = app.server app.layout = html.Div([ html.Div([ html.Div([ dcc.Dropdown( id='crossfilter-xaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Fertility rate, total (births per woman)' ), dcc.RadioItems( id='crossfilter-xaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'display': 'inline-block'}), html.Div([ dcc.Dropdown( id='crossfilter-yaxis-column', options=[{'label': i, 'value': i} for i in available_indicators], value='Life expectancy at birth, total (years)' ), dcc.RadioItems( id='crossfilter-yaxis-type', options=[{'label': i, 'value': i} for i in ['Linear', 'Log']], value='Linear', labelStyle={'display': 'inline-block'} ) ], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}) ], style={ 'borderBottom': 'thin lightgrey solid', 'backgroundColor': 'rgb(250, 250, 250)', 'padding': '10px 5px' }), html.Div([ dcc.Graph( id='crossfilter-indicator-scatter', hoverData={'points': [{'customdata': 'Japan'}]} ) ], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}), html.Div([ dcc.Graph(id='x-time-series'), dcc.Graph(id='y-time-series'), ], style={'display': 'inline-block', 'width': '49%'}), html.Div(dcc.Slider( id='crossfilter-year--slider', min=df['Year'].min(), max=df['Year'].max(), value=df['Year'].max(), marks={str(year): str(year) for year in df['Year'].unique()}, step=None ), style={'width': '49%', 'padding': '0px 20px 20px 20px'}) ]) @app.callback( dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value'), dash.dependencies.Input('crossfilter-year--slider', 'value')]) def update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type, year_value): dff = df[df['Year'] == year_value] return { 'data': [dict( x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'], y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'], text=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], customdata=dff[dff['Indicator Name'] == yaxis_column_name]['Country Name'], mode='markers', marker={ 'size': 25, 'opacity': 0.7, 'color': 'orange', 'line': {'width': 2, 'color': 'purple'} } )], 'layout': dict( xaxis={ 'title': xaxis_column_name, 'type': 'linear' if xaxis_type == 'Linear' else 'log' }, yaxis={ 'title': yaxis_column_name, 'type': 'linear' if yaxis_type == 'Linear' else 'log' }, margin={'l': 40, 'b': 30, 't': 10, 'r': 0}, height=450, hovermode='closest' ) } def create_time_series(dff, axis_type, title): return { 'data': [dict( x=dff['Year'], y=dff['Value'], mode='lines+markers' )], 'layout': { 'height': 225, 'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10}, 'annotations': [{ 'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom', 'xref': 'paper', 'yref': 'paper', 'showarrow': False, 'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)', 'text': title }], 'yaxis': {'type': 'linear' if axis_type == 'Linear' else 'log'}, 'xaxis': {'showgrid': False} } } @app.callback( dash.dependencies.Output('x-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-xaxis-column', 'value'), dash.dependencies.Input('crossfilter-xaxis-type', 'value')]) def update_y_timeseries(hoverData, xaxis_column_name, axis_type): country_name = hoverData['points'][0]['customdata'] dff = df[df['Country Name'] == country_name] dff = dff[dff['Indicator Name'] == xaxis_column_name] title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name) return create_time_series(dff, axis_type, title) @app.callback( dash.dependencies.Output('y-time-series', 'figure'), [dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'), dash.dependencies.Input('crossfilter-yaxis-column', 'value'), dash.dependencies.Input('crossfilter-yaxis-type', 'value')]) def update_x_timeseries(hoverData, yaxis_column_name, axis_type): dff = df[df['Country Name'] == hoverData['points'][0]['customdata']] dff = dff[dff['Indicator Name'] == yaxis_column_name] return create_time_series(dff, axis_type, yaxis_column_name) ###Output _____no_output_____ ###Markdown Serve the app using `run_server`. Unlike the standard `Dash.run_server` method, the `JupyterDash.run_server` method doesn't block execution of the notebook. It serves the app in a background thread, making it possible to run other notebook calculations while the app is running.This makes it possible to iterativly update the app without rerunning the potentially expensive data processing steps. ###Code app.run_server() ###Output Dash app running on: ###Markdown By default, `run_server` displays a URL that you can click on to open the app in a browser tab. The `mode` argument to `run_server` can be used to change this behavior. Setting `mode="inline"` will display the app directly in the notebook output cell. ###Code app.run_server(mode="inline") ###Output _____no_output_____ ###Markdown When running in JupyterLab, with the `jupyterlab-dash` extension, setting `mode="jupyterlab"` will open the app in a tab in JupyterLab.```pythonapp.run_server(mode="jupyterlab")``` ###Code ###Output _____no_output_____ ###Markdown Getting started with OGGM: a real case study, step by step The OGGM workflow is best explained with an example. In the following, we will show how to apply the standard [OGGM workflow](http://docs.oggm.org/en/stable/introduction.html) to a list of glaciers. This example is meant to guide you through a first-time setup step-by-step. If you prefer not to install OGGM on your computer, you can always run this notebook in [OGGM-Edu](https://edu.oggm.org) instead! Set-up Input data folders **If you are using your own computer**: before you start, make sure that you have set-up the [input data configuration file](https://docs.oggm.org/en/stable/input-data.html) at your wish.In the course of this tutorial, we will need to download data needed for each glacier (a couple of mb at max, depending on the chosen glaciers), so make sure you have an internet connection. cfg.initialize() and cfg.PARAMS An OGGM simulation script will always start with the following commands: ###Code from oggm import cfg, utils cfg.initialize(logging_level='WARNING') ###Output _____no_output_____ ###Markdown A call to [cfg.initialize()](https://docs.oggm.org/en/stable/generated/oggm.cfg.initialize.html) will read the default parameter file (or any user-provided file) and make them available to all other OGGM tools via the `cfg.PARAMS` dictionary. Here are some examples of these parameters: ###Code cfg.PARAMS['prcp_scaling_factor'], cfg.PARAMS['ice_density'], cfg.PARAMS['continue_on_error'] ###Output _____no_output_____ ###Markdown See [here](https://github.com/OGGM/oggm/blob/master/oggm/params.cfg) for the default parameter file and a description of their role and default value. ###Code # You can try with or without multiprocessing: with two glaciers, OGGM could run on two processors cfg.PARAMS['use_multiprocessing'] = True ###Output _____no_output_____ ###Markdown Workflow In this section, we will explain the fundamental concepts of the OGGM workflow:- Working directories- Glacier directories- Tasks ###Code from oggm import workflow ###Output _____no_output_____ ###Markdown Working directory Each OGGM run needs a **single folder** where to store the results of the computations for all glaciers. This is called a "working directory" and needs to be specified before each run. Here we create a temporary folder for you: ###Code cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-GettingStarted', reset=True) cfg.PATHS['working_dir'] ###Output _____no_output_____ ###Markdown We use a temporary directory for this example, but in practice you will set this working directory yourself (for example: `/home/john/OGGM_output`. The size of this directory will depend on how many glaciers you'll simulate!**This working directory is meant to be persistent**, i.e. you can stop your processing workflow after any task, and restart from an existing working directory at a later stage. Define the glaciers for the run ###Code rgi_ids = ['RGI60-11.01328', 'RGI60-11.00897'] ###Output _____no_output_____ ###Markdown You can provide any number of glacier identifiers to OGGM. In this case, we chose: - `RGI60-11.01328`: [Unteraar Glacier](https://en.wikipedia.org/wiki/Unteraargletscher) in the Swiss Alps- `RGI60-11.00897`: [Hintereisferner](http://acinn.uibk.ac.at/research/ice-and-climate/projects/hintereisferner) in the Austrian Alps.Here is a list of other glaciers you might want to try out:- `RGI60-18.02342`: Tasman Glacier in New Zealand- `RGI60-11.00787`: [Kesselwandferner](https://de.wikipedia.org/wiki/Kesselwandferner) in the Austrian Alps- ... or any other glacier identifier! You can find other glacier identifiers by exploring the [GLIMS viewer](https://www.glims.org/maps/glims).For an operational run on an RGI region, you might want to download the [Randolph Glacier Inventory](https://www.glims.org/RGI/) dataset instead, and start a run from it. This case is covered in the [working with the RGI](working_with_rgi.ipynb) tutorial. Glacier directories The OGGM workflow is organized as a list of **tasks** that have to be applied to a list of glaciers. The vast majority of tasks are called **entity tasks**: they are standalone operations to be realized on one single glacier entity. These tasks are executed sequentially (one after another): they often need input generated by the previous task(s): for example, the climate calibration needs the glacier flowlines, which can be only computed after the topography data has been processed, and so on.To handle this situation, OGGM uses a workflow based on data persistence on disk: instead of passing data as python variables from one task to another, each task will read the data from disk and then write the computation results back to the disk, making these new data available for the next task in the queue.These glacier specific data are located in [glacier directories](https://docs.oggm.org/en/stable/glacierdir.htmlglacier-directories). In the model, these directories are initialized with the following command (this can take a little while on the first call, as OGGM needs to download some data): ###Code # Where to fetch the pre-processed directories gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=80) ###Output _____no_output_____ ###Markdown - the keyword `from_prepro_level` indicates that we will start from [pre-processed directories](https://docs.oggm.org/en/stable/input-data.htmlpre-processed-directories), i.e. data that are already pre-processed for the model users. In many cases you will want to start from level 3, 4 or 5. Here we start from level 3 and rerun some of the processing in order to demonstrate the OGGM workflow.- the `prepro_border` keyword indicates the number of DEM grid points which we'd like to add to each side of the glacier for the local map: the larger the glacier will grow, the larger the border parameter should be. The available pre-processed border values are: **10, 80, 160** (depending on the model set-ups there might be more or less options). These are the fixed map sizes we prepared for you - any other map size will require a full processing (see the [alternative DEM example](dem_sources.ipynb) for a tutorial). The [init_glacier_directories](https://docs.oggm.org/en/stable/generated/oggm.workflow.init_glacier_directories.htmloggm.workflow.init_glacier_directories) task will allways be the very first task to call for all your OGGM experiments. Let's see what it gives us back: ###Code type(gdirs), type(gdirs[0]) ###Output _____no_output_____ ###Markdown `gdirs` is a list of [GlacierDirectory](https://docs.oggm.org/en/stable/generated/oggm.GlacierDirectory.htmloggm.GlacierDirectory) objects (one for each glacier). **Glacier directories are used by OGGM as "file and attribute manager"** for single glaciers. For example, the model now knows where to find the topography data file for this glacier: ###Code gdir = gdirs[0] # take Unteraar glacier print('Path to the DEM:', gdir.get_filepath('dem')) ###Output _____no_output_____ ###Markdown And we can also access some attributes of this glacier: ###Code gdir gdir.rgi_date # date at which the outlines are valid ###Output _____no_output_____ ###Markdown The advantage of this Glacier Directory data model is that it simplifies greatly the data transfer between tasks. **The single mandatory argument of all entity tasks will allways be a glacier directory**. With the glacier directory, each task will find the input it needs: for example, both the glacier's topography and outlines are needed for the next plotting function, and both are available via the `gdir` argument: ###Code from oggm import graphics graphics.plot_domain(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Another advantage of glacier directories is their persistence on disk: once created, **they can be recovered from the same location** by using `init_glacier_directories` again, but without keyword arguments: ###Code # Fetch the LOCAL pre-processed directories - note that no arguments are used! gdirs = workflow.init_glacier_directories(rgi_ids) ###Output _____no_output_____ ###Markdown See the [store_and_compress_glacierdirs](store_and_compress_glacierdirs.ipynb) tutorial for more information on glacier directories. Tasks There are two different types of "[tasks](http://docs.oggm.org/en/stable/api.htmlentity-tasks)":**Entity Tasks**: Standalone operations to be realized on one single glacier entity, independently from the others. The majority of OGGM tasks are entity tasks. They are parallelisable: the same task can run on several glaciers in parallel.**Global Task**: Tasks which require to work on several glacier entities at the same time. Model parameter calibration or the compilation of several glaciers' output are examples of global tasks. OGGM implements a simple mechanism to run a specific task on a list of `GlacierDirectory` objects: ###Code from oggm import tasks # run the glacier_masks task on all gdirs workflow.execute_entity_task(tasks.glacier_masks, gdirs); ###Output _____no_output_____ ###Markdown The task we just applied to our list of glaciers is [glacier_masks](http://docs.oggm.org/en/stable/generated/oggm.tasks.glacier_masks.htmloggm.tasks.glacier_masks). It wrote a new file in our glacier directory, providing raster masks of the glacier (among other things): ###Code print('Path to the masks:', gdir.get_filepath('gridded_data')) ###Output _____no_output_____ ###Markdown It is also possible to apply several tasks sequentially (i.e. one after an other) on our glacier list: ###Code list_talks = [ tasks.compute_centerlines, tasks.initialize_flowlines, tasks.compute_downstream_line, ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The function `execute_task` can run a task on different glaciers at the same time, if the `use_multiprocessing` option is set to `True` in the configuration file. Among other things, we computed the glacier flowlines and the glacier's downstream line. We can now plot them: ###Code graphics.plot_centerlines(gdir, figsize=(8, 7), use_flowlines=True, add_downstream=True) ###Output _____no_output_____ ###Markdown As a result, the glacier directories now store many more files. If you are interested, you can have a look: ###Code import os print(os.listdir(gdir.dir)) ###Output _____no_output_____ ###Markdown For a short explanation of what these files are, see the [glacier directory documentation](https://docs.oggm.org/en/stable/api.htmlcfg-basenames). In practice, however, you will only rarely need to access these files yourself. Other preprocessing tasks Let's continue with the other preprocessing tasks: ###Code list_talks = [ tasks.catchment_area, tasks.catchment_width_geom, tasks.catchment_width_correction, tasks.compute_downstream_bedshape ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown We just computed the catchment areas of each flowline (the colors are arbitrary): ###Code graphics.plot_catchment_areas(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Each flowline now knows what area will contribute to its surface mass-balance and ice flow. Accordingly, it is possible to compute each glacier cross-section's width, and correct it so that the total glacier area and elevation distribution is conserved: ###Code graphics.plot_catchment_width(gdir, corrected=True, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Climate tasks The glacier directories we downloaded already contains the climate timeseries for each glacier (`from_prepro_level=3`). Let's have a look at them: ###Code import xarray as xr fpath = gdir.get_filepath('climate_historical') ds = xr.open_dataset(fpath) # Data is in hydrological years # -> let's just ignore the first and last calendar years ds.temp.resample(time='AS').mean()[1:-1].plot(); ###Output _____no_output_____ ###Markdown This climate data is called the "baseline climate" for this glacier. It will be used for the mass-balance model calibration, and at the end of this tutorial also to generate the random climate to drive a simulation. When running OGGM with GCM data, the GCM timeseries will be computed as anomalies to this baseline timeseries, hence the name.Here we are using CRU, but OGGM-Shop also allows to use ERA5 and CERA as baseline.Now, let's calibrate the mass-balance model for this glacier. The calibration procedure of OGGM is ... original, but is also quite powerful. Read the [doc page](https://docs.oggm.org/en/stable/mass-balance.html) or the [GMD paper](https://www.geosci-model-dev-discuss.net/gmd-2018-9/) for more details, and you can also follow the [mass-balance calibration tutorial](massbalance_calibration.ipynb) explaining some of the model internals.The default calibration process is automated (see also [local_t_star](https://docs.oggm.org/en/stable/generated/oggm.tasks.local_t_star.htmloggm.tasks.local_t_star)): ###Code # Fetch the reference t* list and associated model parameters params_url = 'https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/oggm_v1.4/RGIV62/CRU/centerlines/qc3/pcp2.5' workflow.download_ref_tstars(base_url=params_url) # Now calibrate workflow.execute_entity_task(tasks.local_t_star, gdirs); workflow.execute_entity_task(tasks.mu_star_calibration, gdirs); ###Output _____no_output_____ ###Markdown **¡Important!** The calibration of the mass-balance model is automated only for certain parameter combinations of the model - any change in the mass-balance model settings (e.g. the melt threshold, the precipitation correction factor, etc.) will require a re-calibration of the model (see the [mass-balance calibration tutorial](massbalance_calibration.ipynb) for an introduction to this topic). From there, OGGM can now compute the mass-balance for these glaciers. For example: ###Code from oggm.core.massbalance import MultipleFlowlineMassBalance gdir_hef = gdirs[1] mbmod = MultipleFlowlineMassBalance(gdir_hef, use_inversion_flowlines=True) import numpy as np import matplotlib.pyplot as plt years = np.arange(1902, 2017) mb_ts = mbmod.get_specific_mb(year=years) plt.plot(years, mb_ts); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown For the Hintereiferner (not for Unteraar where no observational data is available), we can also compare our computed mass-balance to the measured one: ###Code mbdf = gdir_hef.get_ref_mb_data() mbdf['OGGM'] = mbmod.get_specific_mb(year=mbdf.index) mbdf[['ANNUAL_BALANCE', 'OGGM']].plot(); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown This graphic is interesting because it shows an effect often observed when comparing the computed mass balance to the observed one: since (in this case) the OGGM geometry is fixed with time, the modelled specific mass-balance series are likely to have a stronger trend than the observed ones.To assess the results of the OGGM mass-balance model for all WGMS glaciers worldwide, visit the [score summary](https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/oggm_v1.4/RGIV62/CRU/centerlines/qc3/pcp2.5/_scores/) for this particular model settings. Computing the ice thickness ("inversion") With the computed mass-balance and the flowlines, OGGM can now compute the ice thickness, based on the principles of [mass conservation and ice dynamics](http://docs.oggm.org/en/stable/inversion.html). ###Code list_talks = [ tasks.prepare_for_inversion, # This is a preprocessing task tasks.mass_conservation_inversion, # This does the actual job tasks.filter_inversion_output # This smoothes the thicknesses at the tongue a little ] for task in list_talks: workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The ice thickness is computed for all sections along the flowline, and can be displayed with the help of OGGM's graphics module: ###Code graphics.plot_inversion(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown The inversion is realized with the default parameter settings: it must be noted that the model is sensitive to the choice of some of them, most notably the creep parameter A: ###Code cfg.PARAMS['inversion_glen_a'] a_factor = np.linspace(0.1, 10., 100) volume = [] for f in a_factor: # Recompute the volume without overwriting the previous computations v = tasks.mass_conservation_inversion(gdir, glen_a=f * cfg.PARAMS['inversion_glen_a'], write=False) volume.append(v * 1e-9) plt.plot(a_factor, volume); plt.title('Unteraar total volume'); plt.ylabel('Volume (km$^3$)'); plt.xlabel('Glen A factor (1 = default)'); ###Output _____no_output_____ ###Markdown There is no simple way to find the best A for each individual glacier. It can easily vary by a factor of 10 (or more) from one glacier to another. At the global scale, the "best" A is close to the default value (possibly between 1 and 1.5 times larger). The default parameter a good choice in a first step but be aware that reconstructions based on this default parameter might be very uncertain! See our [ice thickness inversion tutorial](inversion.ipynb) for a more in-depth discussion. Simulations For most applications, this is where the fun starts! With climate data and an estimate of the ice thickness, we can now start transient simulations. For this tutorial, we will show how to realize idealized experiments based on the baseline climate only, but it is also possible to drive OGGM with real GCM data. ###Code # Convert the flowlines to a "glacier" for the ice dynamics module workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs); ###Output _____no_output_____ ###Markdown Let's start a run driven by a the climate of the last 31 years, shuffled randomly for 200 years. This can be seen as a "commitment" simulation, i.e. how much glaciers will change even without further climate change: ###Code cfg.PARAMS['store_model_geometry'] = True # add additional outputs for the maps below workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, y0=2000, output_filesuffix='_2000'); ###Output _____no_output_____ ###Markdown The output of this simulation is stored in two separate files: a diagnostic file (which contains time series variables such as length, volume, ELA, etc.) and a full model output file, which is larger but allows to reproduce the full glacier geometry changes during the run.In practice, the diagnostic files are often compiled for the entire list of glaciers: ###Code ds2000 = utils.compile_run_output(gdirs, input_filesuffix='_2000') ###Output _____no_output_____ ###Markdown This dataset is also stored on disk (in the working directory) as NetCDF file for later use. Here we can access it directly: ###Code ds2000 ###Output _____no_output_____ ###Markdown We opened the file with [xarray](http://xarray.pydata.org), a very useful data analysis library based on [pandas](http://pandas.pydata.org/). For example, we can plot the volume and length evolution of both glaciers with time: ###Code f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4)) ds2000.volume.plot.line(ax=ax1, hue='rgi_id'); ds2000.length.plot.line(ax=ax2, hue='rgi_id'); ###Output _____no_output_____ ###Markdown The full model output files can be used for plots: ###Code f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 6)) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=0, ax=ax1, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=50, ax=ax2, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=150, ax=ax3, vmax=350) plt.tight_layout(); ###Output _____no_output_____ ###Markdown Sensitivity to temperature Now repeat our simulations with a +0.5°C and -0.5°C temperature bias, which for a glacier is quite a lot! ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=0.5, y0=2000, output_filesuffix='_p05'); workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=-0.5, y0=2000, output_filesuffix='_m05'); dsp = utils.compile_run_output(gdirs, input_filesuffix='_p05') dsm = utils.compile_run_output(gdirs, input_filesuffix='_m05') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4)) rgi_id = 'RGI60-11.01328' ds2000.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='Commitment'); ds2000.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); ds2000.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$+$ 0.5°C'); dsp.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='$-$ 0.5°C'); dsm.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); ax1.legend(); ###Output _____no_output_____ ###Markdown Getting started with IceVision![](https://airctic.github.io/icevision/images/icevision-logo-slogan.png) Why IceVision?- IceVision is an Object-Detection Framework that connects to different libraries/frameworks such as fastai, Pytorch Lightning, and Pytorch with more to come.- Features a Unified Data API with out-of-the-box support for common annotation formats (COCO, VOC, etc.)- The [IceData repo](https://github.com/airctic/icedata) hosts community maintained parsers and custom datasets - Provides flexible model implementations with pluggable backbones- Helps researchers reproduce, replicate, and go beyond published models- Enables practioners to get moving with object detection technology quickly IntroductionThis tutorial walks you through the different steps of training and using a model.The IceVision Framework is an **agnostic framework**. To demonstrate this we will train and use our model with both the [fastai](https://github.com/fastai/fastai), and [pytorch-lightning](https://github.com/PyTorchLightning/pytorch-lightning) libraries.If you are using Google Colab, the GPU runtime should be enabled, but if you experience problems when training your model, you may want to check this.`Runtime` -> `Change runtime type` -> `Hardware accelerator dropdown` -> `GPU` Install icevision and icedata ###Code !pip install icevision[all] !pip install icedata ###Output _____no_output_____ ###Markdown Import the package ###Code from icevision.all import * import icedata ###Output _____no_output_____ ###Markdown DatasetsIceVision provides handy methods to load a dataset, parse annotations, and more. In the example below, we work with the [PETS dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/) to detect cats and dogs in images and identify their species. Loading the PETS dataset is one line code. ###Code data_dir = icedata.pets.load_data() data_dir ###Output _____no_output_____ ###Markdown ParserThe `Parser` is one of the most important concepts in IceVision. It allows us to work with **any** annotation format.The basic job of the parser is to convert a custom format to something the library can understand. You might still need to create a custom parser for your own dataset. Fear not! Creating parsers is easy. After you've finished this tutorial, check this [custom parser documentation](https://airctic.github.io/icevision/custom_parser/) to understand how to.IceVision already provides a `parser` for the Pets Dataset ###Code class_map = icedata.pets.class_map() class_map parser = icedata.pets.parser(data_dir, class_map) ###Output _____no_output_____ ###Markdown Split the datasetNext, we define the `train/valid` splits for the data. Let's use random splits for this one. ###Code data_splitter = RandomSplitter([.8, .2]) ###Output _____no_output_____ ###Markdown Parse the dataNext we `parse()` the dataset using the data splitter. This returns returns 2 lists of records: one for training and another for validation. ###Code train_records, valid_records = parser.parse(data_splitter) ###Output _____no_output_____ ###Markdown !!! info "What's a record?" A record is a dictionary that contains all parsed fields defined by the parser used. No matter what format the annotation has, a record has a common structure that can be connected to different DL frameworks (fastai, Pytorch-Lightning, etc.) Visualize the training dataWe can show one of the records (image + box + label). This helps to understand what is in the dataset and check that the boxes and labels make sense. ###Code show_record(train_records[1]) ###Output _____no_output_____ ###Markdown We can also display the label instead of its identifier by providing the `class_map`. ###Code show_record(train_records[1], class_map=class_map) ###Output _____no_output_____ ###Markdown Of course, we often want to see several images with their corresponding boxes and labels. ###Code records = train_records[:6] show_records(records, ncols=3, class_map=class_map) ###Output _____no_output_____ ###Markdown TransformsData transformations are an essential part of the training pipeline. There are many transformation libraries available including: [albumentations](https://github.com/albumentations-team/albumentations), [solt](https://github.com/MIPT-Oulu/solt), and [torchvision](https://pytorch.org/docs/stable/torchvision/transforms.htmltransforms-on-pil-image).IceVision supports the widely used [albumentations](https://github.com/albumentations-team/albumentations) library out-of-the-box.It is possible to integrate other transform libraries. You just need to inherit and override all abstract methods of the `Transform` class. We plan to add more to future versions in response to community feedback. It is typical to use different transformations for the training and validation datasets. The `valid_tfms` apply to the validation set. These are minimal - just resizing the image and normalising it. The `train_tfms` typically do data augmentations such as zoom, crop, lighting adjustments, horizontal flips, and so on. These help to reduce the required training set size, reduce overfitting, and produce a more robust model. Icevision makes this easy - all of the bounding boxes are adjusted if needed. For example, zooming in will make the bounding boxes larger. Crops will not cut any bounding boxes.The `presize` parameter helps to improve the resulting image quality. See the [Fast AI Book](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb) for more details.The `A.Normalize` function applies a set of default normalizations that have been refined over the years on the Imagenet dataset. ###Code presize = 512 size = 384 valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(size), tfms.A.Normalize()]) train_tfms = tfms.A.Adapter([*tfms.A.aug_tfms(size=size, presize=presize), tfms.A.Normalize()]) ###Output _____no_output_____ ###Markdown DatasetThe `Dataset` class combines the records and transforms.To create a `Dataset`, we just need need to pass the parsed records from the previous step along with the transforms. ###Code train_ds = Dataset(train_records, train_tfms) valid_ds = Dataset(valid_records, valid_tfms) ###Output _____no_output_____ ###Markdown !!! info "What does the `Dataset` class do?" - Prepares the record: For example, in the record we just have a filename that points to the image, it's at this stage that we open the image. - Applies the pipeline of transforms to the record prepared in the previous step !!! info "Lazy transforms" Transforms are applied **lazily**, meaning they are only applied when we grab (get) an item. This means that, if you have augmentation (random) transforms, each time you get the **same** item from the dataset you will get a slightly different version of it. !!! danger "Important" Because we normalized our images with `imagenet_stats`, when displaying transformed images, we need to denormalize them. The `show_sample` function receives an optional argument called `denormalize_fn` that we can be passed: In our case, we pass `denormalize_imagenet`. Displaying the same image with different transforms ###Code samples = [train_ds[3] for _ in range(6)] show_samples(samples, ncols=3, class_map=class_map, denormalize_fn=denormalize_imagenet) ###Output _____no_output_____ ###Markdown ModelIn this tutorial, we are learning to predict bounding boxes and classes, but not performing image segmentation. We will use the `FasterRCNN` model. To create the model, we need to specify how many classes our dataset has. This is the length of the `class_map`. Note that the `class_map` includes a value for `"background"` with index 0, which is added behind the scenes by default. ###Code model = faster_rcnn.model(num_classes=len(class_map)) ###Output _____no_output_____ ###Markdown DataLoaderEach model has its own dataloader (a pytorch `DataLoader`) that could be customized: the dataloaders for the RCNN models have a custom collate function. ###Code train_dl = faster_rcnn.train_dl(train_ds, batch_size=16, num_workers=4, shuffle=True) valid_dl = faster_rcnn.valid_dl(valid_ds, batch_size=16, num_workers=4, shuffle=False) ###Output _____no_output_____ ###Markdown TrainingIceVision is an agnostic framework meaning it can be plugged to multiple DL frameworks such as [fastai](https://github.com/fastai/fastai), and [pytorch-lightning](https://github.com/PyTorchLightning/pytorch-lightning). You could also plug it into a new DL frameworks using your own custom code. MetricsMetrics are essential for tracking the model progress as it's training. Here we are going to be using the well established `COCOMetric`, which reports on the mean average precision of the predictions. ###Code metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] ###Output _____no_output_____ ###Markdown Training with fastai Creating a Learner objectCreating a fastai compatible `Learner` using the fastai interface. ###Code learn = faster_rcnn.fastai.learner(dls=[train_dl, valid_dl], model=model, metrics=metrics) ###Output _____no_output_____ ###Markdown Training the RCNN model using fastai `fine_tune()` methodThe fastai `fine_tune` method is useful when you have a pre-trained model, which we are using. It does an initial epoch where it freezes everything except its final layers. It then carries on for the indicated number of epochs using a differential learning rate to train the whole model. It adjusts the learning rate both across the layers of the model as well as across the epochs. This can give excellent results with reduced training time.In September 2020, if everything is working, the model might require around 3 minutes per epoch on a free Google Colab server. ###Code learn.fine_tune(10, lr=1e-4) ###Output _____no_output_____ ###Markdown Training with Pytorch-Lightning ###Code # import lightning engine provided by the icevision modules import pytorch_lightning as pl ###Output _____no_output_____ ###Markdown Creating a Pytorch-Lightning (PL) model classIt inherits from `RCNNLightningAdapter` and implements the method PL `configure_optimizers`. ###Code class LightModel(faster_rcnn.lightning.ModelAdapter): def configure_optimizers(self): return SGD(self.parameters(), lr=1e-4) ###Output _____no_output_____ ###Markdown **Note:** If you are familiar to working with lightning, you will note that we've been able to skip some of the boilerplate. This is because the IceVision `RCNNLightningAdapter` takes care of it behind the scene. For example, it defines `training_step` and `validation_step`. The adaptor also supports working with `Metric`s. If you need custom functionality, you can override or re-implement those methods. ###Code # Creating a PL model object light_model = LightModel(model, metrics=metrics) ###Output _____no_output_____ ###Markdown Training the RCNN model using PL `Trainer.fit()` method ###Code trainer = pl.Trainer(max_epochs=10, gpus=1) trainer.fit(light_model, train_dl, valid_dl) ###Output _____no_output_____ ###Markdown Inference Load a modelTraining the model with `fastai` using `fine_tune` twice and I got led the the following results: * train_loss: 0.06772 * valid_loss: 0.074435 Using our Trained WeightsIf you don't want to train the model, you can use our trained weights that we publicly available: You can download them with `torch.hub`: ###Code weights_url = "https://icevisiondels.s3.us-east-2.amazonaws.com/pets.zip" state_dict = torch.hub.load_state_dict_from_url(weights_url, map_location=torch.device("cuda")) ###Output _____no_output_____ ###Markdown !!! info "Note" Typically inference is done on the cpu, this is why we specify the paramater `map_location` to `cpu` when loading the state dict. Let's recreate the model and load the downloaded weights: ###Code model = faster_rcnn.model(num_classes=len(class_map)) model.load_state_dict(state_dict) ###Output _____no_output_____ ###Markdown The first step for prediction is to have some images, let's grab some random ones from the validation dataset: 11.3- Predict all images at once If you don't have too many images, you can get predictions with a single forward pass. In case your images don't fit in memory simultaneously, you should predict in batches, the next section shows how to do that. For demonstration purposes, let's take download a single image from the internet and see how our model performs on it. ###Code IMAGE_URL = "https://petcaramelo.com/wp-content/uploads/2018/06/beagle-cachorro.jpg" IMG_PATH = "tmp.jpg" download_url(IMAGE_URL, IMG_PATH) img = open_img(IMG_PATH) show_img(img) ###Output _____no_output_____ ###Markdown !!! info "Try other images!" Change `IMAGE_URL` to point to another image you found on the internet. Just be sure to take one of the breeds from `class_map`, or else the model might get confused. Whenever you have images in memory (numpy arrays) you can use `Dataset.from_images`. We're going to use the same transforms we used on the validation dataset. ###Code infer_ds = Dataset.from_images([img], valid_tfms) ###Output _____no_output_____ ###Markdown For any model, the prediction steps are always the same, first call `build_infer_batch` and then `predict`.For `faster_rcnn` we have `detection_threshold`, which specifies how confident the model should be to output a bounding box. ###Code batch, samples = faster_rcnn.build_infer_batch(infer_ds) preds = faster_rcnn.predict(model=model, batch=batch) ###Output _____no_output_____ ###Markdown For displaying the predictions, we first need to grab our image from `samples`. We do this instead of using the original images because transforms may have been applied to the image (in fact, in this case, a resize was used). ###Code imgs = [sample["img"] for sample in samples] ###Output _____no_output_____ ###Markdown Now we just need to call `show_preds`, to show the image with its corresponding predictions (boxes + labels). ###Code show_preds( imgs=imgs, preds=preds, class_map=class_map, denormalize_fn=denormalize_imagenet, show=True, ) ###Output _____no_output_____ ###Markdown 11.4- Predicting a batch of imagesInstead of predicting a whole list of images at one, we can process a small batch at the time: This option is more memory efficient: We use `infer_dataloader` Had we have a test dataset, we would have maken our predicition using the batch technique mentionned here above. As an illustrative example, we will predict all images belonging to the validation dataset using the following approach: ###Code infer_dl = faster_rcnn.infer_dl(valid_ds, batch_size=16) samples, preds = faster_rcnn.predict_dl(model=model, infer_dl=infer_dl) ###Output _____no_output_____ ###Markdown Same as before, we grab our images from `samples`. ###Code imgs = [sample["img"] for sample in samples] ###Output _____no_output_____ ###Markdown Let's show the first 6 predictions: ###Code show_preds( imgs=imgs[:6], preds=preds[:6], ncols=3, class_map=class_map, denormalize_fn=denormalize_imagenet, show=True, ) ###Output _____no_output_____ ###Markdown A brief introduction to `simpleQE`simpleQE is not currently tuned for large-scale data analysis, and is meant for research-level, exploratory analysis.Nick Kern11/1/2020 ###Code %matplotlib inline import matplotlib.pyplot as plt import numpy as np from scipy import stats, signal import copy import simpleqe as sqe import palettable as pt # pip install this ###Output _____no_output_____ ###Markdown Setup Your Covariance MatricesThese will be used in our GPR analysis, but also to generate mock data. ###Code def Cfg(freqs, variance=1e3, length_scale=5e6): """ A Gaussian foreground covariance freqs : array_like, (Nfreqs, 1) frequency array in Hz variance : float Amplitude of diagonal length_scale : float length scale parameter (Hz) """ return variance * np.exp(-0.5 * (freqs - freqs.T)**2 / length_scale**2) def Cn(freqs, variance=1): """ A white noise covariance freqs : array_like, (Nfreqs, 1) frequency array in Hz variance : float Amplitude of diagonal """ return variance * np.eye(len(freqs)) def Ceor(freqs, variance=1e-1, length_scale=1e6): """ An exponential EoR covariance freqs : array_like, (Nfreqs, 1) frequency array in Hz variance : float Amplitude of diagonal length_scale : float length scale parameter (Hz) """ return variance * np.exp(-np.abs(freqs - freqs.T) / length_scale) ###Output _____no_output_____ ###Markdown Generate DataGenerate mock data. This is a simple simulation of independent realizations of a single baseline. ###Code freqs = np.linspace(130e6, 140e6, 50, endpoint=True) # Hz Nfreqs = len(freqs) cosmo = sqe.utils.Cosmology() # simulate fg, eor, noise and stick into container D # note that D.x1 and D.x2 are the two data realizations, which have the same FG and EoR realizations # (you can check by comparing F.x1 and F.x2), but have different noise realizations (therefore no noise bias) Ntimes = 100 D, F, E, N = sqe.utils.gen_data(freqs, Cfg, Ceor, Cn, Ntimes=Ntimes, cosmo=cosmo, ind_noise=True, Omega_Eff=0.1) fig, axes = plt.subplots(1, 4, figsize=(15, 4)) fig.subplots_adjust(wspace=.3) labels = ['full data', 'foregrounds', 'eor', 'noise'] for i, q in enumerate([D, F, E, N]): ax = axes[i] cax = ax.imshow(np.abs(q.x1), aspect='auto') fig.colorbar(cax, ax=ax) ax.set_title(labels[i], fontsize=12) ax.set_xlabel('frequency channel', fontsize=12) if i == 0: ax.set_ylabel('time bin', fontsize=12) fig.suptitle("Visibility components [arbitrary units]", fontsize=14, y=1) ###Output _____no_output_____ ###Markdown __Figure 1__ | Components of the data in the visibility, simulated in arbitrary units with each time bin being uncorrelated from the next. Perform GPR-FS ###Code def errorbar(ax, kp, y, yerr=None, W=None, fmt='o', ks=np.s_[:], **kwargs): if W is not None: # get x bins and errorbars from window functions x, xerr_low, xerr_hi = sqe.utils.interp_Wcdf(W, kp) xerr = np.array([[xerr_low], [xerr_hi]]).T else: x = kp xerr = np.zeros((len(kp), 1, 2)) if yerr is None: yerr = np.array([None] * len(y)) # iterate over points and plot p = [] for xp, yp, ye, xe in zip(x[ks], y[ks], yerr[ks], xerr[ks]): p.append(ax.errorbar(xp, yp, ls='', xerr=xe.T, yerr=ye, fmt=fmt, **kwargs)) return p # generate weighting matrices I = np.eye(Nfreqs) t = np.diag(signal.windows.blackmanharris(Nfreqs)) # tapering function gpr_fs = I - F.C @ np.linalg.pinv(D.C) # set R matrices D.set_R(gpr_fs); F.set_R(t); E.set_R(I); N.set_R(I) # compute Q and H D.compute_Q(); F.compute_Q(); E.compute_Q(); N.compute_Q() D.compute_H(); F.compute_H(); E.compute_H(); N.compute_H() # compute un-normalized band power: q D.compute_q(); F.compute_q(); E.compute_q(); N.compute_q() # compute normalization, window func, V, and normalized p (and spherically average it) D.compute_MWVp(norm='H^-1/2', C_data=D.C, C_bias=F.C) # no noise bias (see above) F.compute_MWVp(norm='I') E.compute_MWVp(norm='I') N.compute_MWVp(norm='I') fig, axes = plt.subplots(2, 1, figsize=(14, 10)) fig.subplots_adjust(hspace=0.02) xlim = 0.0, 0.6 ylim = 1e3, 1e13 prop_cycle = pt.matplotlib.Inferno_20.mpl_colors ax = axes[0] ax.tick_params(labelsize=12, direction='in', size=4) ax.tick_params(which='minor', size=2, direction='in') # plot FG averaged over time p1, = ax.plot(F.kp_sph, np.abs(np.mean(F.p_sph, axis=1).real), c='steelblue', lw=1.5, alpha=1) # plot eor averaged over time p2, = ax.plot(E.kp_sph, np.abs(np.mean(E.p_sph, axis=1).real), c='limegreen', lw=1.5, alpha=1) # plot noise averaged over time p3 = ax.axhline(np.mean(np.abs(np.mean(N.p_sph, axis=1).real)), color='k', ls='--', lw=1.5) # plot GPR-FS ax.set_prop_cycle('color', prop_cycle) errorbar(ax, D.kp_sph, np.mean(D.p_sph - D.b_sph, axis=1).real, yerr=np.sqrt(np.diag(D.V_sph)/Ntimes), fmt='o', ms=5, elinewidth=0.75, W=D.W_sph) ax.grid() ax.set_yscale('log') ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xticklabels([]) ax.set_ylabel(r'$P(k)$ [arb units]', fontsize=18) ax.legend([p1, p2, p3], ['FG', 'EoR', 'Noise'], fontsize=16, ncol=2) ax = axes[1] ax.set_prop_cycle('color', prop_cycle) ax.tick_params(labelsize=12, direction='in', size=4, top=True) ax.tick_params(which='minor', size=2, direction='in') ax.plot(D.kp_sph, D.W_sph.T, lw=0.75, alpha=1) ax.grid() ax.set_xlim(xlim) ax.set_ylim(-.0, 1.10) ax.set_xlabel(r'$k\ [{\rm Mpc}^{-1}]$', fontsize=14) ax.set_ylabel(r'$W(k)$', fontsize=18) ###Output /Users/nkern/Software/anaconda3/lib/python3.7/site-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part return array(a, dtype, copy=False, order=order) ###Markdown __Figure 2__ | Example power spectra of FG (blue), EoR (green), noise floor (black) and recovered power spectra after GPR-FS (points). Let's try wideband inpainting ###Code # triple the bandwidth from before freqs = np.linspace(120e6, 150e6, 150, endpoint=True) # Hz Nfreqs = len(freqs) cosmo = sqe.utils.Cosmology() # generate a wideband dataset, with a narrowband power spectrum window Ntimes = 100 D, F, E, N = sqe.utils.gen_data(freqs, Cfg, Ceor, Cn, Ntimes=Ntimes, cosmo=cosmo, ind_noise=True, Omega_Eff=0.1, pspec_spw=slice(50, 100)) D_flagged = copy.deepcopy(D) # generate flags flags = np.zeros(Nfreqs, np.bool) fchans = [(20, 21), (40, 41), (80, 81), (95, 96), (120, 121)] for fc in fchans: flags[fc[0]:fc[1]] = True fig, ax = plt.subplots(figsize=(10, 4)) ax.axvspan(50, 100, color='k', alpha=0.15) ax.plot(flags) ax.set_xlabel('frequency channel', fontsize=12) ###Output _____no_output_____ ###Markdown __Figure 3__ | Flagging mask we will apply to the data. Shaded region indicates power spectrum spectral window. ###Code # generate weighting matrices I = np.eye(Nfreqs) t = np.eye(Nfreqs) t[50:100, 50:100] = np.diag(signal.windows.blackmanharris(D.spw_Nfreqs)) # tapering function gpr_fs = I - F.C @ np.linalg.pinv(D.C) Wf = I.copy() Wf[~flags, ~flags] = 0.0 # get inpainting matrices across wide bandwidth gpr_dm = (F.C) @ np.linalg.inv(F.C + N.C + E.C + Wf * 1e10) gpr_ip = I - Wf + Wf @ gpr_dm # set R matrices D.set_R(t @ gpr_ip); D_flagged.set_R(t @ (I-Wf)); F.set_R(t); E.set_R(t); N.set_R(t) # compute Q and H D.compute_Q(); D_flagged.compute_Q(); F.compute_Q(); E.compute_Q(); N.compute_Q() D.compute_H(); D_flagged.compute_H(); F.compute_H(); E.compute_H(); N.compute_H() # compute un-normalized band power: q D.compute_q(); D_flagged.compute_q(); F.compute_q(); E.compute_q(); N.compute_q() # compute normalizations D.compute_MWVp(norm='I', C_data=D.C) D_flagged.compute_MWVp(norm='I') F.compute_MWVp(norm='I') E.compute_MWVp(norm='I') N.compute_MWVp(norm='I') fig, axes = plt.subplots(2, 2, figsize=(15, 10)) fig.subplots_adjust(hspace=0.02) xlim = 120, 150 ax = axes[0, 0] ax.tick_params(labelsize=12, direction='in', size=4) ax.tick_params(which='minor', size=2, direction='in') ax.axvspan(50, 100, color='limegreen', alpha=0.15) for fc in fchans: ax.axvspan(freqs[fc[0]-1]/1e6, freqs[fc[1]]/1e6, color='k', alpha=0.2) d = D.x1[1].copy() dip = gpr_dm @ d d[flags] *= np.nan p0, = ax.plot(F.freqs/1e6, np.abs(F.x1[1]), lw=4.5, color='steelblue', alpha=0.9) p1, = ax.plot(F.freqs/1e6, np.abs(dip), lw=1.5, color='gold', ls='--') r = plt.Rectangle((0,0), 0, 0, color='k', alpha=0.2) ax.set_xlim(xlim) ax.set_ylim(0.1, 70) ax.legend([p0, p1, r], ['Foreground data', 'Inpainted model', 'Flagged channels'], fontsize=13) ax.set_ylabel(r'$|V|$ [Jy]', fontsize=16) ax.set_xticklabels([]) ax = axes[1, 0] ax.tick_params(labelsize=12, direction='in', size=4, top=True) ax.tick_params(which='minor', size=2, direction='in') #ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0e')) ax.plot(F.freqs/1e6, np.abs(dip - F.x1[1]), color='k', lw=1) ax.set_xlim(xlim) ax.set_ylim(0.0001, 1) ax.set_xlabel(r'$\nu$ [MHz]', fontsize=16) ax.set_ylabel(r'Residual [Jy]', fontsize=14, labelpad=5) xlim = 0.0, 0.6 ylim = 1e3, 1e13 prop_cycle = pt.matplotlib.Inferno_20.mpl_colors ax = axes[0, 1] ax.tick_params(labelsize=12, direction='in', size=4) ax.tick_params(which='minor', size=2, direction='in') # plot FG averaged over time p1, = ax.plot(F.kp_sph, np.abs(np.mean(F.p_sph, axis=1).real), c='steelblue', lw=1.5, alpha=1) # plot eor averaged over time p2, = ax.plot(E.kp_sph, np.abs(np.mean(E.p_sph, axis=1).real), c='limegreen', lw=1.5, alpha=1) # plot noise averaged over time p3 = ax.axhline(np.mean(np.abs(np.mean(N.p_sph, axis=1).real)), color='k', ls='--', lw=1.5) # plot GPR-FS ax.set_prop_cycle('color', prop_cycle) errorbar(ax, D.kp_sph, np.mean(D.p_sph, axis=1).real, yerr=np.sqrt(np.diag(D.V_sph)/Ntimes), fmt='o', ms=5, elinewidth=0.75, W=D.W_sph) errorbar(ax, D.kp_sph, np.mean(D_flagged.p_sph, axis=1).real, c='darkred', yerr=np.sqrt(np.diag(D_flagged.V_sph)/Ntimes), fmt='o', ms=5, elinewidth=0.75, W=D_flagged.W_sph) ax.grid() ax.set_yscale('log') ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xticklabels([]) ax.set_ylabel(r'$P(k)$ [arb units]', fontsize=18) #ax.legend([p1, p2, p3, p4], ['FG', 'EoR', 'Noise', 'GPR-FS'], fontsize=16, ncol=2) ax = axes[1, 1] ax.set_prop_cycle('color', prop_cycle) ax.tick_params(labelsize=12, direction='in', size=4, top=True) ax.tick_params(which='minor', size=2, direction='in') ax.plot(D.kp_sph, D.W_sph.T, lw=0.75, alpha=1) ax.grid() ax.set_xlim(xlim) ax.set_ylim(-.0, 1.10) ax.set_xlabel(r'$k\ [{\rm Mpc}^{-1}]$', fontsize=14) ax.set_ylabel(r'$W(k)$', fontsize=18) ###Output _____no_output_____ ###Markdown Getting started with OGGM: a real case study, step by step The OGGM workflow is best explained with an example. In the following, we will show how to apply the standard [OGGM workflow](http://docs.oggm.org/en/stable/introduction.html) to a list of glaciers. This example is meant to guide you through a first-time setup step-by-step. If you prefer not to install OGGM on your computer, you can always run this notebook in [OGGM-Edu](https://edu.oggm.org) instead! Set-up Input data folders **If you are using your own computer**: before you start, make sure that you have set-up the [input data configuration file](https://docs.oggm.org/en/stable/input-data.html) at your wish.In the course of this tutorial, we will need to download data needed for each glacier (a couple of mb at max, depending on the chosen glaciers), so make sure you have an internet connection. cfg.initialize() and cfg.PARAMS An OGGM simulation script will always start with the following commands: ###Code from oggm import cfg, utils cfg.initialize(logging_level='WARNING') ###Output _____no_output_____ ###Markdown A call to [cfg.initialize()](https://docs.oggm.org/en/latest/generated/oggm.cfg.initialize.html) will read the default parameter file (or any user-provided file) and make them available to all other OGGM tools via the `cfg.PARAMS` dictionary. Here are some example of these parameters: ###Code cfg.PARAMS['prcp_scaling_factor'], cfg.PARAMS['ice_density'], cfg.PARAMS['continue_on_error'] ###Output _____no_output_____ ###Markdown See [here](https://github.com/OGGM/oggm/blob/master/oggm/params.cfg) for the default parameter file and a description of their role and default value. ###Code # You can try with or without multiprocessing: with two glaciers, OGGM could run on two processors cfg.PARAMS['use_multiprocessing'] = True ###Output _____no_output_____ ###Markdown Workflow In this section, we will explain the fundamental concepts of the OGGM workflow:- Working directories- Glacier directories- Tasks ###Code from oggm import workflow ###Output _____no_output_____ ###Markdown Working directory Each OGGM run needs a **single folder** where to store the results of the computations for all glaciers. This is called a "working directory" and needs to be specified before each run. Here we create a temporary folder for you: ###Code cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-GettingStarted', reset=True) cfg.PATHS['working_dir'] ###Output _____no_output_____ ###Markdown We use a temporary directory for this example, but in practice you will set this working directory yourself (for example: `/home/john/OGGM_output`. The size of this directory will depend on how many glaciers you'll simulate!**This working directory is meant to be persistent**, i.e. you can stop your processing workflow after any task, and restart from an existing working directory at a later stage. Define the glaciers for the run ###Code rgi_ids = ['RGI60-11.01328', 'RGI60-11.00897'] ###Output _____no_output_____ ###Markdown You can provide any number of glacier identifiers to OGGM. Here is a list of other glaciers you might want to try out:- `RGI60-11.01328`: [Unteraar Glacier](https://en.wikipedia.org/wiki/Unteraargletscher) in the Swiss Alps- `RGI60-11.00897`: [Hintereisferner](http://acinn.uibk.ac.at/research/ice-and-climate/projects/hintereisferner) in the Austrian Alps. You can also try out:- `RGI60-18.02342`: Tasman Glacier in New Zealand- `RGI60-11.00787`: [Kesselwandferner](https://de.wikipedia.org/wiki/Kesselwandferner) in the Austrian Alps- ... or any other glacier identifier! You can find other glacier identifiers by exploring the [GLIMS viewer](https://www.glims.org/maps/glims).For an operational run on an RGI region, you might want to download the [Randolph Glacier Inventory](https://www.glims.org/RGI/) dataset instead, and start a run from it. This case is covered in the [working with the RGI](working_with_rgi.ipynb) tutorial. Glacier directories The OGGM workflow is organized as a list of **tasks** that have to be applied to a list of glaciers. The vast majority of tasks are called **entity tasks**: they are standalone operations to be realized on one single glacier entity. These tasks are executed sequentially (one after another): they often need input generated by the previous task(s): for example, the climate calibration needs the glacier flowlines, which can be only computed after the topography data has been processed, and so on.To handle this situation, OGGM uses a workflow based on data persistence on disk: instead of passing data as python variables from one task to another, each task will read the data from disk and then write the computation results back to the disk, making these new data available for the next task in the queue.These glacier specific data are located in [glacier directories](https://docs.oggm.org/en/latest/glacierdir.htmlglacier-directories). In the model, these directories are initialized with the following command (this can take a little while on the first call, as OGGM needs to download some data): ###Code # Where to fetch the pre-processed directories gdirs = workflow.init_glacier_directories(rgi_ids, from_prepro_level=3, prepro_border=80) ###Output _____no_output_____ ###Markdown - the keyword `from_prepro_level` indicates that we will start from [pre-processed directories](https://docs.oggm.org/en/latest/input-data.htmlpre-processed-directories), i.e. data that are already pre-processed for the model users. In many cases you will want to start from level 3, 4 or 5. Here we start from level 3 and rerun some of the processing in order to demonstrate the OGGM workflow.- the `prepro_border` keyword indicates the number of DEM grid points which we'd like to add to each side of the glacier for the local map: the larger the glacier will grow, the larger the border parameter should be. The available pre-processed border values are: **10, 80, 160** (depending on the model set-ups there might be more or less options). These are the fixed map sizes we prepared for you - any other map size will require a full processing (see the [alternative DEM example](dem_sources.ipynb) for a tutorial). The [init_glacier_directories](https://docs.oggm.org/en/latest/generated/oggm.workflow.init_glacier_directories.htmloggm.workflow.init_glacier_directories) task will allways be the very first task to call for all your OGGM experiments. Let's see what it gives us back: ###Code type(gdirs), type(gdirs[0]) ###Output _____no_output_____ ###Markdown `gdirs` is a list of [GlacierDirectory](https://docs.oggm.org/en/latest/generated/oggm.GlacierDirectory.htmloggm.GlacierDirectory) objects (one for each glacier). **Glacier directories are used by OGGM as "file and attribute manager"** for single glaciers. For example, the model now knows where to find the topography data file for this glacier: ###Code gdir = gdirs[0] # take Unteraar print('Path to the DEM:', gdir.get_filepath('dem')) ###Output _____no_output_____ ###Markdown And we can also access some attributes of this glacier: ###Code gdir gdir.rgi_date # date at which the outlines are valid ###Output _____no_output_____ ###Markdown The advantage of this Glacier Directory data model is that it simplifies greatly the data transfert between tasks. **The single mandatory argument of all entity tasks will allways be a glacier directory**. With the glacier directory, each task will find the input it needs: for example, both the glacier's topography and outlines are needed for the next plotting function, and both are available via the `gdir` argument: ###Code from oggm import graphics graphics.plot_domain(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Tasks There are two different types of "[tasks](http://docs.oggm.org/en/latest/api.htmlentity-tasks)":**Entity Tasks**: Standalone operations to be realized on one single glacier entity, independently from the others. The majority of OGGM tasks are entity tasks. They are parallelisable: the same task can run on several glaciers in parallel.**Global Task**: Tasks which require to work on several glacier entities at the same time. Model parameter calibration or the compilation of several glaciers' output are examples of global tasks. OGGM implements a simple mechanism to run a specific task on a list of `GlacierDirectory` objects: ###Code from oggm import tasks # run the glacier_masks task on all gdirs workflow.execute_entity_task(tasks.glacier_masks, gdirs); ###Output _____no_output_____ ###Markdown The task we just applied to our list of glaciers is [glacier_masks](http://docs.oggm.org/en/latest/generated/oggm.tasks.glacier_masks.htmloggm.tasks.glacier_masks). It wrote a new file in our glacier directory, providing raster masks of the glacier (among other things): ###Code print('Path to the masks:', gdir.get_filepath('gridded_data')) ###Output _____no_output_____ ###Markdown It is also possible to apply several tasks sequentially (i.e. one after an other) on our glacier list: ###Code list_talks = [ tasks.compute_centerlines, tasks.initialize_flowlines, tasks.compute_downstream_line, ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The function `execute_task` can run a task on different glaciers at the same time, if the `use_multiprocessing` option is set to `True` in the configuration file. Among other things, we computed the glacier flowlines and the glacier's downstream line. We can now plot them: ###Code graphics.plot_centerlines(gdir, figsize=(8, 7), use_flowlines=True, add_downstream=True) ###Output _____no_output_____ ###Markdown As a result, the glacier directories now store many more files. If you are interested, you can have a look: ###Code import os print(os.listdir(gdir.dir)) ###Output _____no_output_____ ###Markdown For a short explanation of what these files are, see the [glacier directory documentation](https://docs.oggm.org/en/latest/glacierdir.htmlcfg-basenames). In practice, however, you will only rarley need to access these files yourself. Other preprocessing tasks Let's continue with the other preprocessing tasks: ###Code list_talks = [ tasks.catchment_area, tasks.catchment_width_geom, tasks.catchment_width_correction, tasks.compute_downstream_bedshape ] for task in list_talks: # The order matters! workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown We just computed the catchment areas of each flowline (the colors are arbitrary): ###Code graphics.plot_catchment_areas(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Each flowline now knows what area will contribute to its surface mass-balance and ice flow. Accordingly, it is possible to compute each glacier cross-section's width, and correct it so that the total glacier area and elevation distribution is conserved: ###Code graphics.plot_catchment_width(gdir, corrected=True, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown Climate tasks The glacier directories we downloaded already contains the climate timeseries for each glacier (`from_prepro_level=3`). Let's have a look at them: ###Code import xarray as xr fpath = gdir.get_filepath('climate_historical') ds = xr.open_dataset(fpath) # Data is in hydrological years # -> let's just ignore the first and last calendar years ds.temp.resample(time='AS').mean()[1:-1].plot(); ###Output _____no_output_____ ###Markdown This climate data is called the "baseline climate" for this glacier. It will be used for the mass-balance model calibration, and at the end of this tutorial also to generate the random climate to drive a simulation. When running OGGM with GCM data, the GCM timeseries will be computed as anomalies to this baseline timeseries, hence the name.Here we are using CRU, but OGGM-Shop also allows to use ERA5 and CERA as baseline.Now, let's calibrate the mass-balance model for this glacier. The calibration procedure of OGGM is ... original, but is also quite powerful. Read the [doc page](https://docs.oggm.org/en/latest/mass-balance.html) or the [GMD paper](https://www.geosci-model-dev-discuss.net/gmd-2018-9/) for more details, and you can also follow the [mass-balance calibration tutorial](massbalance-calibration.ipynb) explaining some of the model internals.The default calibration process is automated: ###Code # Fetch the reference t* list and associated model parameters params_url = 'https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/RGIV62/CRU/centerlines/qc3/pcp2.5' workflow.download_ref_tstars(base_url=params_url) # Now calibrate workflow.execute_entity_task(tasks.local_t_star, gdirs); workflow.execute_entity_task(tasks.mu_star_calibration, gdirs); ###Output _____no_output_____ ###Markdown **¡Important!** The calibration of the mass-balance model is automated only for certain parameter combinations of the model - any change in the mass-balance model settings (e.g. the melt threshold, the precipitation correction factor, etc.) will require a re-calibration of the model (see the [mass-balance calibration tutorial](massbalance-calibration.ipynb) for an introduction to this topic). From there, OGGM can now compute the mass-balance for these glaciers. For example: ###Code from oggm.core.massbalance import MultipleFlowlineMassBalance gdir_hef = gdirs[1] mbmod = MultipleFlowlineMassBalance(gdir_hef, use_inversion_flowlines=True) import numpy as np import matplotlib.pyplot as plt years = np.arange(1902, 2017) mb_ts = mbmod.get_specific_mb(year=years) plt.plot(years, mb_ts); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown For the Hintereiferner (not for Unteraar where no observational data is available), we can also compare our computed mass-balance to the measured one: ###Code mbdf = gdir_hef.get_ref_mb_data() mbdf['OGGM'] = mbmod.get_specific_mb(year=mbdf.index) mbdf[['ANNUAL_BALANCE', 'OGGM']].plot(); plt.ylabel('SMB (mm yr$^{-1}$)'); ###Output _____no_output_____ ###Markdown This graphic is interesting because it shows an effect often observed when comparing the computed mass balance to the observed one: since (in this case) the OGGM geometry is fixed with time, the modelled specific mass-balance series are likely to have a stronger trend than the observed ones.To assess the results of the OGGM mass-balance model for all WGMS glaciers worldwide, visit the [score summary](https://cluster.klima.uni-bremen.de/~oggm/ref_mb_params/RGIV62/CRU/centerlines/qc3/pcp2.5/_scores/) for this particular model settings. Computing the ice thickness ("inversion") With the computed mass-balance and the flowlines, OGGM can now compute the ice thickness, based on the principles [mass conservation and ice dynamics](http://docs.oggm.org/en/latest/inversion.html). ###Code list_talks = [ tasks.prepare_for_inversion, # This is a preprocessing task tasks.mass_conservation_inversion, # This does the actual job tasks.filter_inversion_output # This smoothes the thicknesses at the tongue a little ] for task in list_talks: workflow.execute_entity_task(task, gdirs) ###Output _____no_output_____ ###Markdown The ice thickness is computed for all sections along the flowline, and can be displayed with the help of OGGM's graphics module: ###Code graphics.plot_inversion(gdir, figsize=(8, 7)) ###Output _____no_output_____ ###Markdown The inversion is realized with the default parameter settings: it must be noted that the model is sensitive to the choice of some of them, most notably the creep parameter A: ###Code cfg.PARAMS['inversion_glen_a'] a_factor = np.linspace(0.1, 10., 100) volume = [] for f in a_factor: # Recompute the volume without overwriting the previous computations v = tasks.mass_conservation_inversion(gdir, glen_a=f * cfg.PARAMS['inversion_glen_a'], write=False) volume.append(v * 1e-9) plt.plot(a_factor, volume); plt.title('Unteraar total volume'); plt.ylabel('Volume (km$^3$)'); plt.xlabel('Glen A factor (1 = default)'); ###Output _____no_output_____ ###Markdown There is no simple way to find the best A for each individual glacier. It can easily vary of a factor 10 (or more) from one glacier to another. At the global scale, the "best" A is close to the default value (possibly larger, between 1 and 1.5 times larger). The default parameter a good choice in a first step but be awre that reconstructions based on this default parameter might be very uncertain! See our [ice thickness inversion tutorial](inversion.ipynb) for a more in-depth discussion. Simulations For most applications, this is where the fun starts! With climate data and an estimate of the ice thickness, we can now start transient simulations. For this tutorial, we will show how to realize idealized experiments based on the baseline climate only, but it is also possible to drive OGGM with real GCM data. ###Code # Convert the flowlines to a "glacier" for the ice dynamics module workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs); ###Output _____no_output_____ ###Markdown Let's start a run driven by a the climate of the last 31 years, shuffled randomly for 200 years. This can be seen as a "commitment" simulation, i.e. how much glaciers will change even without further climate change: ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, y0=2000, output_filesuffix='_2000'); ###Output _____no_output_____ ###Markdown The output of this simulation is stored in two separate files: a diagnostic file (which contains time series variables such as length, volume, ELA, etc.) and a full model output file, which is larger but allows to reproduce the full glacier geometry changes during the run.In practice, the diagnostic files are often compiled for the entire lists of glaciers: ###Code ds2000 = utils.compile_run_output(gdirs, filesuffix='_2000') ###Output _____no_output_____ ###Markdown This dataset is also stored on disk (in the working directory) as NetCDF file for later use. Here we can access it directly: ###Code ds2000 ###Output _____no_output_____ ###Markdown We opened the file with [xarray](http://xarray.pydata.org), a very useful data analysis library based on [pandas](http://pandas.pydata.org/). For example, we can plot the volume and length evolution of both glaciers with time: ###Code f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 4)) ds2000.volume.plot.line(ax=ax1, hue='rgi_id'); ds2000.length.plot.line(ax=ax2, hue='rgi_id'); ###Output _____no_output_____ ###Markdown The full model output files can be used for plots: ###Code f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 6)) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=0, ax=ax1, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=50, ax=ax2, vmax=350) graphics.plot_modeloutput_map(gdir, filesuffix='_2000', modelyr=150, ax=ax3, vmax=350) plt.tight_layout(); ###Output _____no_output_____ ###Markdown Sensitivity to temperature Now repeat our simulations with a +0.5°C and -0.5°C temperature bias, which for a glacier is quite a lot! ###Code workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=0.5, y0=2000, output_filesuffix='_p05'); workflow.execute_entity_task(tasks.run_random_climate, gdirs, nyears=200, temperature_bias=-0.5, y0=2000, output_filesuffix='_m05'); dsp = utils.compile_run_output(gdirs, filesuffix='_p05') dsm = utils.compile_run_output(gdirs, filesuffix='_m05') f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 4)) rgi_id = 'RGI60-11.01328' ds2000.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='Commitment'); ds2000.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); ds2000.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='+0.5°C'); dsp.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsp.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).volume.plot.line(ax=ax1, hue='rgi_id', label='-0.5°C'); dsm.sel(rgi_id=rgi_id).area.plot.line(ax=ax2, hue='rgi_id'); dsm.sel(rgi_id=rgi_id).length.plot.line(ax=ax3, hue='rgi_id'); ax1.legend(); ###Output _____no_output_____ ###Markdown Your first Edward programProbabilistic modeling in Edward uses a simple language of random variables. Here we will show a Bayesian neural network. It is a neural network with a prior distribution on its weights.A webpage version is available at http://edwardlib.org/getting-started. ###Code %matplotlib inline from __future__ import absolute_import from __future__ import division from __future__ import print_function import edward as ed import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from edward.models import Normal plt.style.use('ggplot') def build_toy_dataset(N=50, noise_std=0.1): x = np.linspace(-3, 3, num=N) y = np.cos(x) + np.random.normal(0, noise_std, size=N) x = x.astype(np.float32).reshape((N, 1)) y = y.astype(np.float32) return x, y def neural_network(x, W_0, W_1, b_0, b_1): h = tf.tanh(tf.matmul(x, W_0) + b_0) h = tf.matmul(h, W_1) + b_1 return tf.reshape(h, [-1]) ###Output _____no_output_____ ###Markdown First, simulate a toy dataset of 50 observations with a cosine relationship. ###Code ed.set_seed(42) N = 50 # number of data points D = 1 # number of features x_train, y_train = build_toy_dataset(N) ###Output _____no_output_____ ###Markdown Next, define a two-layer Bayesian neural network. Here, we define the neural network manually with `tanh` nonlinearities. ###Code W_0 = Normal(loc=tf.zeros([D, 2]), scale=tf.ones([D, 2])) W_1 = Normal(loc=tf.zeros([2, 1]), scale=tf.ones([2, 1])) b_0 = Normal(loc=tf.zeros(2), scale=tf.ones(2)) b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1)) x = x_train y = Normal(loc=neural_network(x, W_0, W_1, b_0, b_1), scale=0.1 * tf.ones(N)) ###Output _____no_output_____ ###Markdown Next, make inferences about the model from data. We will use variational inference. Specify a normal approximation over the weights and biases. ###Code qW_0 = Normal(loc=tf.get_variable("qW_0/loc", [D, 2]), scale=tf.nn.softplus(tf.get_variable("qW_0/scale", [D, 2]))) qW_1 = Normal(loc=tf.get_variable("qW_1/loc", [2, 1]), scale=tf.nn.softplus(tf.get_variable("qW_1/scale", [2, 1]))) qb_0 = Normal(loc=tf.get_variable("qb_0/loc", [2]), scale=tf.nn.softplus(tf.get_variable("qb_0/scale", [2]))) qb_1 = Normal(loc=tf.get_variable("qb_1/loc", [1]), scale=tf.nn.softplus(tf.get_variable("qb_1/scale", [1]))) ###Output _____no_output_____ ###Markdown Defining `tf.get_variable` allows the variational factors’ parameters to vary. They are initialized randomly. The standard deviation parameters are constrained to be greater than zero according to a [softplus](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) transformation. ###Code # Sample functions from variational model to visualize fits. rs = np.random.RandomState(0) inputs = np.linspace(-5, 5, num=400, dtype=np.float32) x = tf.expand_dims(inputs, 1) mus = tf.stack( [neural_network(x, qW_0.sample(), qW_1.sample(), qb_0.sample(), qb_1.sample()) for _ in range(10)]) # FIRST VISUALIZATION (prior) sess = ed.get_session() tf.global_variables_initializer().run() outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 0") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='prior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____ ###Markdown Now, run variational inference with the [Kullback-Leibler](https://en.wikipedia.org/wiki/Kullback–Leibler_divergence) divergence in order to infer the model’s latent variables with the given data. We specify `1000` iterations. ###Code inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1}, data={y: y_train}) inference.run(n_iter=1000, n_samples=5) ###Output 1000/1000 [100%] ██████████████████████████████ Elapsed: 12s | Loss: -5.755 ###Markdown Finally, criticize the model fit. Bayesian neural networks define a distribution over neural networks, so we can perform a graphical check. Draw neural networks from the inferred model and visualize how well it fits the data. ###Code # SECOND VISUALIZATION (posterior) outputs = mus.eval() fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(111) ax.set_title("Iteration: 1000") ax.plot(x_train, y_train, 'ks', alpha=0.5, label='(x, y)') ax.plot(inputs, outputs[0].T, 'r', lw=2, alpha=0.5, label='posterior draws') ax.plot(inputs, outputs[1:].T, 'r', lw=2, alpha=0.5) ax.set_xlim([-5, 5]) ax.set_ylim([-2, 2]) ax.legend() plt.show() ###Output _____no_output_____
monoalphabetic_cipher.ipynb
###Markdown Ciphertext ###Code ciphertext = '''Ts Satbesyd Hsewtf fat hf ceqcxf jat qswck I acmt ftepsw atcbp ahw wtkjhsk atb lkptb ckx sjatb kcwt Ik ahf txtf fat tyehuftf ckp ubtpswhkcjtf jat qaset so atb ftv Ij qcf ksj jacj at otej ckx twsjhsk cdhk js esmt osb Ibtkt Apetb Aee twsjhskf ckp jacj skt ucbjhylecbex qtbt crasbbtkj js ahf ysep ubtyhft rlj cpwhbcrex rceckytp whkp Ht qcf I jcdt hj jat wsfj utbotyj btcfskhkg ckp srftbmhkg wcyahkt jacj jat qsbep acf fttk rlj cf c esmtb at qslep acmt uecytp ahwfteo hk c oceft usfhjhsk Ht ktmtb fusdt so jat fsojtb ucffhskf fcmt qhja c ghrt ckp c fkttb Tatx qtbt cpwhbcret jahkgf osb jat srftbmtb—tvyteetkj osb pbcqhkg jat mthe obsw wtkf wsjhmtf ckp cyjhskf Blj osb jat jbchktp btcfsktb js cpwhj flya hkjblfhskf hkjs ahf sqk ptehycjt ckp ohktex cpnlfjtp jtwutbcwtkj qcf js hkjbsplyt c phfjbcyjhkg ocyjsb qahya whgaj jabsq c pslrj lusk cee ahf wtkjce btflejf Gbhj hk c ftkfhjhmt hkfjblwtkj sb c ybcyd hk skt so ahf sqk ahgausqtb etkftf qslep ksj rt wsbt phfjlbrhkg jack c fjbskg twsjhsk hk c kcjlbt flya cf ahf Akp xtj jatbt qcf rlj skt qswck js ahw ckp jacj qswck qcf jat ecjt Ibtkt Apetb so plrhslf ckp iltfjhskcret wtwsbx I acp fttk ehjjet so Hsewtf ecjtex Mx wcbbhcgt acp pbhojtp lf cqcx obsw tcya sjatb Mx sqk yswuetjt acuuhktff ckp jat aswtytkjbtp hkjtbtfjf qahya bhft lu cbslkp jat wck qas ohbfj ohkpf ahwfteo wcfjtb so ahf sqk tfjcrehfawtkj qtbt floohyhtkj js crfsbr cee wx cjjtkjhsk qahet Hsewtf qas escjatp tmtbx osbw so fsyhtjx qhja ahf qaset Bsatwhck fsle btwchktp hk slb espghkgf hk Bcdtb Sjbttj rlbhtp cwskg ahf sep rssdf ckp cejtbkcjhkg obsw qttd js qttd rtjqttk ysychkt ckp cwrhjhsk jat pbsqfhktff so jat pblg ckp jat ohtbyt tktbgx so ahf sqk dttk kcjlbt Ht qcf fjhee cf tmtb pttuex cjjbcyjtp rx jat fjlpx so ybhwt ckp syyluhtp ahf hwwtkft ocylejhtf ckp tvjbcsbphkcbx usqtbf so srftbmcjhsk hk oseesqhkg slj jasft yeltf ckp yetcbhkg lu jasft wxfjtbhtf qahya acp rttk crckpsktp cf asutetff rx jat soohyhce usehyt Fbsw jhwt js jhwt I atcbp fswt mcglt cyyslkj so ahf pshkgf: so ahf flwwskf js Optffc hk jat ycft so jat Tbtusoo wlbptb so ahf yetcbhkg lu so jat fhkglecb jbcgtpx so jat Ajdhkfsk rbsjatbf cj Tbhkyswcett ckp ohkceex so jat whffhsk qahya at acp cyyswuehfatp fs ptehycjtex ckp flyytffoleex osb jat bthgkhkg ocwhex so Hseeckp Btxskp jatft fhgkf so ahf cyjhmhjx asqtmtb qahya I wtbtex facbtp qhja cee jat btcptbf so jat pchex ubtff I dktq ehjjet so wx osbwtb obhtkp ckp yswuckhsk''' ###Output _____no_output_____ ###Markdown It appears only lowercase letters are encrypted ###Code import string ciphertext_lower = list(filter(lambda x: x in string.ascii_lowercase, ciphertext)) ###Output _____no_output_____ ###Markdown Calculate the letter frequenciesSort by most frequent. ###Code freq_ciphertext = sorted([(ciphertext_lower.count(c), c) for c in string.ascii_lowercase], reverse = True) ###Output _____no_output_____ ###Markdown English letter frequencies in the same order ###Code freq_english = "etaoinsrhldcumfpgwybvkxjqz" ###Output _____no_output_____ ###Markdown Secret alphabet so far ###Code secret = ''.join([c[1] for c in freq_ciphertext]) print(freq_english) print(secret) ###Output etaoinsrhldcumfpgwybvkxjqz tsjckhfbaepwoyqlxrugmdvniz ###Markdown In correct order ###Code print(string.ascii_lowercase) print(''.join([x[1] for x in sorted(zip(freq_english, secret))])) ###Output abcdefghijklmnopqrstuvwxyz jgwptqxakndeyhclibfsomrvuz ###Markdown Try to substitueWe now try to figure out what the words partly visible should be. ###Code print(''.join([i if i in string.ascii_uppercase or i == ' ' else freq_english[secret.find(i)] for i in ciphertext])) ###Output Tt Sherltmk Htlces she ns olfogs ahe ftcoi I hove seldtc heord hnc ceianti her pider oig taher ioce Ii hns eges she emlnyses oid yredtcnioaes ahe fhtle tu her sex Ia fos ita ahoa he uela oig ectanti okni at ltve utr Ireie Adler All ectantis oid ahoa tie yoranmplorlg fere owhtrreia at hns mtld yremnse wpa odcnrowlg woloimed cnid He fos I aoke na ahe ctsa yeruema reostinib oid twservnib comhnie ahoa ahe ftrld hos seei wpa os o ltver he ftpld hove ylomed hncselu ni o uolse ytsnanti He iever sytke tu ahe stuaer yossntis sove fnah o bnwe oid o sieer Theg fere odcnrowle ahnibs utr ahe twserverzexmelleia utr drofnib ahe venl urtc ceis ctanves oid omantis Bpa utr ahe aronied reostier at odcna spmh niarpsntis niat hns tfi delnmoae oid unielg odjpsaed aecyeroceia fos at niartdpme o dnsaromanib uomatr fhnmh cnbha ahrtf o dtpwa pyti oll hns ceiaol resplas Grna ni o seisnanve nisarpceia tr o mromk ni tie tu hns tfi hnbhytfer leises ftpld ita we ctre dnsaprwnib ahoi o sartib ectanti ni o ioapre spmh os hns Aid gea ahere fos wpa tie ftcoi at hnc oid ahoa ftcoi fos ahe loae Ireie Adler tu dpwntps oid qpesantiowle cectrgzI hod seei lnaale tu Htlces loaelg Mg corrnobe hod drnuaed ps ofog urtc eomh taher Mg tfi mtcyleae hoyyniess oid ahe htcemeiared niaeresas fhnmh rnse py ortpid ahe coi fht unrsa unids hncselu cosaer tu hns tfi esaowlnshceia fere spuunmneia at owstrw oll cg oaaeianti fhnle Htlces fht ltoahed everg utrc tu stmneag fnah hns fhtle Bthecnoi stpl reconied ni tpr ltdbnibs ni Boker Sareea wprned octib hns tld wttks oid olaerioanib urtc feek at feek weafeei mtmonie oid ocwnanti ahe drtfsniess tu ahe drpb oid ahe unerme eierbg tu hns tfi keei ioapre He fos sanll os ever deeylg oaaromaed wg ahe sapdg tu mrnce oid tmmpyned hns ncceise uomplanes oid exarotrdniorg ytfers tu twservoanti ni utlltfnib tpa ahtse mlpes oid mleornib py ahtse cgsaernes fhnmh hod weei owoidtied os htyeless wg ahe tuunmnol ytlnme Frtc ance at ance I heord stce vobpe ommtpia tu hns dtnibsz tu hns spcctis at Odesso ni ahe mose tu ahe Treytuu cprder tu hns mleornib py tu ahe snibplor arobedg tu ahe Aaknisti wrtahers oa Trnimtcolee oid uniollg tu ahe cnssnti fhnmh he hod ommtcylnshed st delnmoaelg oid spmmessupllg utr ahe renbinib uocnlg tu Htlloid Begtid ahese snbis tu hns omanvnag htfever fhnmh I cerelg shored fnah oll ahe reoders tu ahe donlg yress I kief lnaale tu cg utrcer urneid oid mtcyointi ###Markdown Apply the necessary correctionsLook at the plaintext, then find the corresponding ciphertext character and specify the correct meaning. ###Code correction = list(secret) def swap(cipherchar, meaning): mpos = freq_english.find(meaning) cpos = ''.join(correction).find(cipherchar) tmp = correction[mpos] correction[mpos] = correction[cpos] correction[cpos] = tmp swap('s', 'o') swap('y', 'c') swap('h', 'i') swap('c', 'a') swap('q', 'w') swap('x', 'y') swap('l', 'u') swap('o', 'f') swap('u', 'p') swap('r', 'b') correction = ''.join(correction) ###Output _____no_output_____ ###Markdown Ciphertext and plaintext column wiseFor easy spotting of the characters that are decrypted wrong. ###Code start = 3 rows = 32 decrypted = [i if i in string.ascii_uppercase or i == ' ' else freq_english[correction.find(i)] for i in ciphertext[start:rows]] list(zip(ciphertext, ''.join(decrypted))) ###Output _____no_output_____ ###Markdown Try decrypting the whole ciphertext ###Code ''.join([c if c in string.ascii_uppercase or c == ' ' else freq_english[correction.find(c)] for c in ciphertext]) ###Output _____no_output_____ ###Markdown Corrected secret in order ###Code print(string.ascii_lowercase) print(''.join([c[1] for c in sorted(zip(freq_english, correction))])) ###Output abcdefghijklmnopqrstuvwxyz cryptogahndewksuibfjlmqvxz
run_boto3-custom.ipynb
###Markdown Helper package for AWS S3 with boto3 Functions covered* S3* * `upload`* * `read_file` ###Code import pandas as pd import numpy as np import os from boto3_custom.simple_storage import upload, read_file ###Output _____no_output_____ ###Markdown Upload directory of files into S3 Setup: create directory containing a file ###Code # create data file inside of directory from sklearn.datasets import load_iris, load_diabetes iris = load_iris() df = pd.DataFrame(iris['data'], columns=list(iris.feature_names)) dir1 = 'new_folder' os.mkdir(dir1) df.to_csv(os.path.join(dir1, 'iris.csv')) ###Output _____no_output_____ ###Markdown Upload folder ###Code upload('new_folder') ###Output _____no_output_____ ###Markdown Setup: create file within subdirectory ###Code diab = load_diabetes() df2 = pd.DataFrame(diab['data'], columns=list(diab.feature_names)) df2.to_csv(os.path.join(dir1, 'diab.csv')) ###Output _____no_output_____ ###Markdown Upload fileRemember that when uploading a file, a value for `sub_dir` (subdirectory) is required. ###Code # os.getcwd() # show current working directory os.chdir('new_folder') # change directory (into folder) # os.getcwd() upload(obj='diab.csv', sub_dir='new_folder') os.chdir('..') # change directory (up one level) # os.getcwd() ###Output _____no_output_____ ###Markdown Read a file from S3 ###Code df_diabetes = read_file(sub_dir='new_folder', file='diab.csv') df_diabetes.head() ###Output _____no_output_____ ###Markdown clean up ###Code # clear local workspace for file in os.listdir(dir1): os.remove(os.path.join(dir1, file)) os.rmdir(dir1) # After function call, folder is shown !aws s3 ls 's3://originchain/new_folder/' --profile dl_user # Clear contents uploaded to AWS S3 !aws s3 rm --recursive 's3://originchain/new_folder' --profile dl_user ###Output _____no_output_____
Python/tensorflow/DeepLearningZeroToAll/Lab12-2-char-seq-rnn.ipynb
###Markdown Lab 12 RNN(Recurrent Neural Network) Lab12-2-char-seq-rnn.ipynb ###Code # Lab 12 Character Sequence RNN import tensorflow as tf import numpy as np tf.set_random_seed(777) # reproducibility sample = " if you want you" idx2char = list(set(sample)) # index -> char char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex # hyper parameters dic_size = len(char2idx) # RNN input size (one hot size) hidden_size = len(char2idx) # RNN output size num_classes = len(char2idx) # final output size (RNN or softmax, etc.) batch_size = 1 # one sample data, one batch sequence_length = len(sample) - 1 # number of lstm rollings (unit #) learning_rate = 0.1 sample_idx = [char2idx[c] for c in sample] # char to index x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello X = tf.placeholder(tf.int32, [None, sequence_length]) # X data Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label x_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0 cell = tf.contrib.rnn.BasicLSTMCell( num_units=hidden_size, state_is_tuple=True) initial_state = cell.zero_state(batch_size, tf.float32) outputs, _states = tf.nn.dynamic_rnn( cell, x_one_hot, initial_state=initial_state, dtype=tf.float32) # FC layer X_for_fc = tf.reshape(outputs, [-1, hidden_size]) outputs = tf.contrib.layers.fully_connected(X_for_fc, num_classes, activation_fn=None) # reshape out for sequence_loss outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes]) weights = tf.ones([batch_size, sequence_length]) sequence_loss = tf.contrib.seq2seq.sequence_loss( logits=outputs, targets=Y, weights=weights) loss = tf.reduce_mean(sequence_loss) train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) prediction = tf.argmax(outputs, axis=2) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(50): l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data}) result = sess.run(prediction, feed_dict={X: x_data}) # print char using dic result_str = [idx2char[c] for c in np.squeeze(result)] print(i, "loss:", l, "Prediction:", ''.join(result_str)) ''' 0 loss: 2.35377 Prediction: uuuuuuuuuuuuuuu 1 loss: 2.21383 Prediction: yy you y you 2 loss: 2.04317 Prediction: yy yoo ou 3 loss: 1.85869 Prediction: yy ou uou 4 loss: 1.65096 Prediction: yy you a you 5 loss: 1.40243 Prediction: yy you yan you 6 loss: 1.12986 Prediction: yy you wann you 7 loss: 0.907699 Prediction: yy you want you 8 loss: 0.687401 Prediction: yf you want you 9 loss: 0.508868 Prediction: yf you want you 10 loss: 0.379423 Prediction: yf you want you 11 loss: 0.282956 Prediction: if you want you 12 loss: 0.208561 Prediction: if you want you ... ''' ###Output _____no_output_____
04_ingest/01_Copy_TSV_To_S3.ipynb
###Markdown Copy TSV Data To S3 We have chosen the [Amazon Customer Reviews Dataset](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) as our main dataset.The dataset is shared in a public Amazon S3 bucket, and is available in two file formats: * Tab separated value (TSV), a text format - `s3://amazon-reviews-pds/tsv/`* Parquet, an optimized columnar binary format - `s3://amazon-reviews-pds/parquet/`The Parquet dataset is partitioned (divided into subfolders) by the column `product_category` to further improve query performance. With this, you can use a `WHERE` clause on product_category in your SQL queries to only read data specific to that category.We can use the AWS Command Line Interface (CLI) to list the S3 bucket content using the following CLI commands: ###Code !aws s3 ls s3://amazon-reviews-pds/tsv/ !aws s3 ls s3://amazon-reviews-pds/parquet/ ###Output _____no_output_____ ###Markdown To Simulate an Application Writing Into Our Data Lake, We Copy the Public TSV Dataset to a Private S3 Bucket in our Account Check Pre-Requisites from the `01_setup/` Folder ###Code %store -r setup_instance_check_passed try: setup_instance_check_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Instance Check.") print("+++++++++++++++++++++++++++++++") print(setup_instance_check_passed) %store -r setup_dependencies_passed try: setup_dependencies_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++") print(setup_dependencies_passed) %store -r setup_s3_bucket_passed try: setup_s3_bucket_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++") print(setup_s3_bucket_passed) %store -r setup_iam_roles_passed try: setup_iam_roles_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++") print(setup_iam_roles_passed) if not setup_instance_check_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Instance Check.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_dependencies_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_s3_bucket_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_iam_roles_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name account_id = boto3.client("sts").get_caller_identity().get("Account") sm = boto3.Session().client(service_name="sagemaker", region_name=region) ###Output _____no_output_____ ###Markdown Set S3 Source Location (Public S3 Bucket) ###Code s3_public_path_tsv = "s3://amazon-reviews-pds/tsv" %store s3_public_path_tsv ###Output _____no_output_____ ###Markdown Set S3 Destination Location (Our Private S3 Bucket) ###Code s3_private_path_tsv = "s3://{}/amazon-reviews-pds/tsv".format(bucket) print(s3_private_path_tsv) %store s3_private_path_tsv ###Output _____no_output_____ ###Markdown Copy Data From the Public S3 Bucket to our Private S3 Bucket in this AccountAs the full dataset is pretty large, let's just copy 3 files into our bucket to speed things up later. ###Code !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Software_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Gift_Card_v1_00.tsv.gz" ###Output _____no_output_____ ###Markdown _Make sure ^^^^ this ^^^^ S3 COPY command above runs succesfully. We will need those datafiles for the rest of this workshop._ List Files in our Private S3 Bucket in this Account ###Code print(s3_private_path_tsv) !aws s3 ls $s3_private_path_tsv/ from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/sagemaker-{}-{}/amazon-reviews-pds/?region={}&tab=overview">S3 Bucket</a></b>'.format( region, account_id, region ) ) ) ###Output _____no_output_____ ###Markdown Store Variables for the Next Notebooks ###Code %store ###Output _____no_output_____ ###Markdown Release Resources ###Code %%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script> %%javascript try { Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); } catch(err) { // NoOp } # Internal - DO NOT RUN # step_prefix = '04_prepare' # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Digital_Software_v1_00.tsv.gz" # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz" # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Gift_Card_v1_00.tsv.gz" # !aws s3 ls --recursive s3://dsoaws/$step_prefix/ ###Output _____no_output_____ ###Markdown Copy TSV Data To S3 We have chosen the [Amazon Customer Reviews Dataset](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) as our main dataset.The dataset is shared in a public Amazon S3 bucket, and is available in two file formats: * Tab separated value (TSV), a text format - `s3://amazon-reviews-pds/tsv/`* Parquet, an optimized columnar binary format - `s3://amazon-reviews-pds/parquet/`The Parquet dataset is partitioned (divided into subfolders) by the column `product_category` to further improve query performance. With this, you can use a `WHERE` clause on product_category in your SQL queries to only read data specific to that category.We can use the AWS Command Line Interface (CLI) to list the S3 bucket content using the following CLI commands: ###Code !aws s3 ls s3://amazon-reviews-pds/tsv/ !aws s3 ls s3://amazon-reviews-pds/parquet/ ###Output _____no_output_____ ###Markdown To Simulate an Application Writing Into Our Data Lake, We Copy the Public TSV Dataset to a Private S3 Bucket in our Account Check Pre-Requisites from the `01_setup/` Folder ###Code %store -r setup_instance_check_passed try: setup_instance_check_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Instance Check.") print("+++++++++++++++++++++++++++++++") print(setup_instance_check_passed) %store -r setup_dependencies_passed try: setup_dependencies_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++") print(setup_dependencies_passed) %store -r setup_s3_bucket_passed try: setup_s3_bucket_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++") print(setup_s3_bucket_passed) %store -r setup_iam_roles_passed try: setup_iam_roles_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++") print(setup_iam_roles_passed) if not setup_instance_check_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Instance Check.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_dependencies_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_s3_bucket_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_iam_roles_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name account_id = boto3.client("sts").get_caller_identity().get("Account") sm = boto3.Session().client(service_name="sagemaker", region_name=region) ###Output _____no_output_____ ###Markdown Set S3 Source Location (Public S3 Bucket) ###Code s3_public_path_tsv = "s3://amazon-reviews-pds/tsv" %store s3_public_path_tsv ###Output _____no_output_____ ###Markdown Set S3 Destination Location (Our Private S3 Bucket) ###Code s3_private_path_tsv = "s3://{}/amazon-reviews-pds/tsv".format(bucket) print(s3_private_path_tsv) %store s3_private_path_tsv ###Output _____no_output_____ ###Markdown Copy Data From the Public S3 Bucket to our Private S3 Bucket in this AccountAs the full dataset is pretty large, let's just copy 3 files into our bucket to speed things up later. ###Code !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Software_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Gift_Card_v1_00.tsv.gz" ###Output _____no_output_____ ###Markdown _Make sure ^^^^ this ^^^^ S3 COPY command above runs succesfully. We will need those datafiles for the rest of this workshop._ List Files in our Private S3 Bucket in this Account ###Code print(s3_private_path_tsv) !aws s3 ls $s3_private_path_tsv/ from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/sagemaker-{}-{}/amazon-reviews-pds/?region={}&tab=overview">S3 Bucket</a></b>'.format( region, account_id, region ) ) ) ###Output _____no_output_____ ###Markdown Store Variables for the Next Notebooks ###Code %store ###Output _____no_output_____ ###Markdown Release Resources ###Code %%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script> %%javascript try { Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); } catch(err) { // NoOp } # Internal - DO NOT RUN # step_prefix = '04_prepare' # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Digital_Software_v1_00.tsv.gz" # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz" # !aws s3 cp --recursive $s3_public_path_tsv/ s3://dsoaws/$step_prefix/ --exclude "*" --include "amazon_reviews_us_Gift_Card_v1_00.tsv.gz" # !aws s3 ls --recursive s3://dsoaws/$step_prefix/ ###Output _____no_output_____ ###Markdown Copy TSV Data To S3 We have chosen the [Amazon Customer Reviews Dataset](https://s3.amazonaws.com/amazon-reviews-pds/readme.html) as our main dataset.The dataset is shared in a public Amazon S3 bucket, and is available in two file formats: * Tab separated value (TSV), a text format - `s3://amazon-reviews-pds/tsv/`* Parquet, an optimized columnar binary format - `s3://amazon-reviews-pds/parquet/`The Parquet dataset is partitioned (divided into subfolders) by the column `product_category` to further improve query performance. With this, you can use a `WHERE` clause on product_category in your SQL queries to only read data specific to that category.We can use the AWS Command Line Interface (CLI) to list the S3 bucket content using the following CLI commands: ###Code !aws s3 ls s3://amazon-reviews-pds/tsv/ !aws s3 ls s3://amazon-reviews-pds/parquet/ ###Output _____no_output_____ ###Markdown To Simulate an Application Writing Into Our Data Lake, We Copy the Public TSV Dataset to a Private S3 Bucket in our Account Check Pre-Requisites from an earlier notebook ###Code %store -r setup_dependencies_passed try: setup_dependencies_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++") print(setup_dependencies_passed) %store -r setup_s3_bucket_passed try: setup_s3_bucket_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++") print(setup_s3_bucket_passed) %store -r setup_iam_roles_passed try: setup_iam_roles_passed except NameError: print("+++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++") print(setup_iam_roles_passed) if not setup_dependencies_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup Dependencies.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_s3_bucket_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup S3 Bucket.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") if not setup_iam_roles_passed: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] YOU HAVE TO RUN ALL NOTEBOOKS IN THE SETUP FOLDER FIRST. You are missing Setup IAM Roles.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name account_id = boto3.client("sts").get_caller_identity().get("Account") sm = boto3.Session().client(service_name="sagemaker", region_name=region) ###Output _____no_output_____ ###Markdown Set S3 Source Location (Public S3 Bucket) ###Code s3_public_path_tsv = "s3://amazon-reviews-pds/tsv" %store s3_public_path_tsv ###Output _____no_output_____ ###Markdown Set S3 Destination Location (Our Private S3 Bucket) ###Code s3_private_path_tsv = "s3://{}/amazon-reviews-pds/tsv".format(bucket) print(s3_private_path_tsv) %store s3_private_path_tsv ###Output _____no_output_____ ###Markdown Copy Data From the Public S3 Bucket to our Private S3 Bucket in this AccountAs the full dataset is pretty large, let's just copy 3 files into our bucket to speed things up later. ###Code !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Software_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz" !aws s3 cp --recursive $s3_public_path_tsv/ $s3_private_path_tsv/ --exclude "*" --include "amazon_reviews_us_Gift_Card_v1_00.tsv.gz" ###Output _____no_output_____ ###Markdown _Make sure ^^^^ this ^^^^ S3 COPY command above runs succesfully. We will need those datafiles for the rest of this workshop._ List Files in our Private S3 Bucket in this Account ###Code print(s3_private_path_tsv) !aws s3 ls $s3_private_path_tsv/ from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/sagemaker-{}-{}/amazon-reviews-pds/?region={}&tab=overview">S3 Bucket</a></b>'.format( region, account_id, region ) ) ) ###Output _____no_output_____ ###Markdown Store Variables for the Next Notebooks ###Code %store ###Output _____no_output_____ ###Markdown Release Resources ###Code %%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script> %%javascript try { Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); } catch(err) { // NoOp } ###Output _____no_output_____
examples/Rules.ipynb
###Markdown Import Package ###Code from cumulus_api import CumulusApi ###Output _____no_output_____ ###Markdown Initialize Cumulus API Class ###Code cml = CumulusApi() ###Output _____no_output_____ ###Markdown List Rules ###Code cml.list_rules(limit=1) ###Output _____no_output_____ ###Markdown Get Rule ###Code cml.get_rule(rule_name="private_bucket_one_time_rule") ###Output _____no_output_____ ###Markdown Create Rule ###Code rule_definition={ "name": "testRule", "workflow": "HelloWorldWorkflow", "collection": { "name": "aces1trig", "version": "1" }, "rule": { "type": "onetime" } } cml.create_rule(data=rule_definition) ###Output _____no_output_____ ###Markdown Update/ Replace Rule ###Code new_rule_definition={ "name": "testRule", "workflow": "HelloWorldWorkflow", "collection": { "name": "gpmkmpx2ifld", "version": "1" }, "rule": { "type": "onetime" } } cml.update_rule(data=new_rule_definition) ###Output _____no_output_____ ###Markdown Delete Rule ###Code cml.delete_rule(rule_name="testRule") ###Output _____no_output_____ ###Markdown Run Rule ###Code cml.run_rule(rule_name="private_bucket_one_time_rule") ###Output _____no_output_____ ###Markdown List all rules ###Code cml.list_rules() ###Output _____no_output_____ ###Markdown List a particular rule (by name) ###Code cml.list_rules(name="msutmt_rule") ###Output _____no_output_____ ###Markdown Only list particular fields from the rules ###Code cml.list_rules(fields="provider,rule") ###Output _____no_output_____ ###Markdown Create rule ###Code data = { "name": "test_onetime_run_private_bucket", "workflow": "DiscoverGranules", "provider": "private_bucket", "collection": { "name": "goesrpltmisrep", "version": "1" }, "meta": {}, "rule": { "type": "onetime", "value": "null" }, "state": "ENABLED" } cml.create_rule(data=data) cml.delete_rule(name="test_onetime_run_private_bucket") ###Output _____no_output_____ ###Markdown Update rule ###Code data = { 'workflow': 'DiscoverGranules', 'provider': 'rss_provider', 'name': 'rss_provider', 'rule': {'type': 'onetime'}, 'collection': {'name': 'rssmif16d', 'version': '7'}, 'state': 'ENABLED'} cml.update_rule(name="rss_provider", data=data) ###Output _____no_output_____
Basics of ML and DL/ML/KNN.ipynb
###Markdown Feature Scaling before applying KNN ###Code min_max_scaler_object = preprocessing.MinMaxScaler() X_train = min_max_scaler_object.fit_transform(X_train) X_test = min_max_scaler_object.transform(X_test) ###Output _____no_output_____ ###Markdown Cross Validation to find which value of k suits our data ###Code x_axis = [] y_axis = [] for k in range(1, 26, 2): clf = KNeighborsClassifier(n_neighbors = k) score = cross_val_score(clf, X_train, Y_train, cv = KFold(n_splits=5, shuffle=True, random_state=0)) x_axis.append(k) y_axis.append(score.mean()) import matplotlib.pyplot as plt plt.plot(x_axis, y_axis) plt.xlabel("k") plt.ylabel("cross_val_score") plt.title("variation of score on different values of k") plt.show() ###Output _____no_output_____ ###Markdown Through above graph we can see that for k=5 it is giving better score so we will choose k=5 Implementing sklearn in-built KNeighboursClassifier ###Code clf = KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='auto', p=2, metric='minkowski') clf.fit(X_train, Y_train) clf.score(X_test, Y_test) ###Output _____no_output_____ ###Markdown Implementing KNN from scratch ###Code from sklearn.metrics import accuracy_score from collections import Counter def train(x, y): return def predict_one(x_train, y_train, x_test, k): distances = [] for i in range(len(x_train)): distance = ((x_train[i, :] - x_test)**2).sum() distances.append([distance, i]) distances = sorted(distances) targets = [] for i in range(k): index_of_training_data = distances[i][1] targets.append(y_train[index_of_training_data]) return Counter(targets).most_common(1)[0][0] def predict(x_train, y_train, x_test_data, k): predictions = [] for x_test in x_test_data: predictions.append(predict_one(x_train, y_train, x_test, k)) return predictions y_pred = predict(X_train, Y_train, X_test, 5) accuracy_score(Y_test, y_pred) a = [1,0,1,1,1,1,0, 2] Counter(a).most_common(1)[0][0] ###Output _____no_output_____
BaselinesWithEDA.ipynb
###Markdown Reversing the dicts ###Code user_id_mapping_r = {value : key for (key, value) in user_id_mapping.items()} business_id_mapping_r = {value : key for (key, value) in business_id_mapping.items()} city_id_mapping_r = {value : key for (key, value) in city_id_mapping.items()} user_id_mapping['_Y7XYBvOrq6F7PL14sxkKg'] len(train_set) ###Output _____no_output_____ ###Markdown Popularity based recommendation**Recommendation based on popular restaurants in each city**Get all restaurants in city and calculate popularity by number of visits ###Code city_restaurants_map = defaultdict(set) city_popular_map = defaultdict(dict) for d in train_set: # {city : set(visited+non visited)} city_restaurants_map[d[1]].update(train_set[d][0]+train_set[d][1]) for visited in train_set[d][0]: if visited not in city_popular_map[d[1]]: # {city : {business : count}} city_popular_map[d[1]][visited] = 1 else: city_popular_map[d[1]][visited] += 1 ###Output _____no_output_____ ###Markdown Getting most popular in each city ###Code numberOfRecommendations = 1 # {city : most popular business} city_most_popular = defaultdict(int) for city in city_popular_map: mostPopular = [(city_popular_map[city][x], x) for x in city_popular_map[city]] mostPopular.sort(reverse = True) city_most_popular[city] = mostPopular[0][1] ###Output _____no_output_____ ###Markdown On validation and test set, check if popular in visited, calculate accuracy ###Code def getPopularityAccuracy(test): correct = 0 for d in test: recommendation = city_most_popular[d[1]] if recommendation in test[d][1]: correct+=1 return correct/len(test) getPopularityAccuracy(test_set) getPopularityAccuracy(validation_set) ###Output _____no_output_____ ###Markdown Top 5 most popular recommendations ###Code numberOfRecommendations = 5 # {city : most popular business} city_most_popular = defaultdict(list) for city in city_popular_map: mostPopular = [(city_popular_map[city][x], x) for x in city_popular_map[city]] mostPopular.sort(reverse = True) recommendations = [] for _, i in mostPopular: if len(recommendations)>=numberOfRecommendations: break recommendations.append(i) city_most_popular[city] = recommendations def getPopularityRecommendationsAccuracy(test): correct = 0 for d in test: recommendations = city_most_popular[d[1]] for r in recommendations: if r in test[d][1]: correct+=1 break return correct/len(test) getPopularityRecommendationsAccuracy(test_set) getPopularityRecommendationsAccuracy(validation_set) ###Output _____no_output_____ ###Markdown Accuracy by precision@5. : No. of top 5 in visited ###Code def getPopularityAccuracyAt5(test): precisionSum = 0 for d in test: recommendations = city_most_popular[d[1]] precision = 0 for r in recommendations: if r in test[d][1]: precision+=1 precision /= len(recommendations) precisionSum+=precision return precisionSum/len(test) getPopularityAccuracyAt5(test_set) getPopularityAccuracyAt5(validation_set) ###Output _____no_output_____ ###Markdown Rating Based Recommendation**Recommendation based on highly reviewed and rated restaurants in each city**Using the business average rating ###Code def parseData(file): for l in open(file): yield eval(l) null = None # {business_created_id : [review_count , stars]} business_rating_map = defaultdict(list) f = './yelp_dataset/yelp_academic_dataset_business.json' for l in tqdm(parseData(f)): if l!=null and l['business_id'] in business_id_mapping: business_rating_map[business_id_mapping[l['business_id']]] = [l['review_count'],l['stars']] len(business_rating_map) ###Output 160585it [00:19, 8100.91it/s] ###Markdown Top 5 most rated and reviewed ###Code city_rated_map = defaultdict(dict) for city in city_restaurants_map: for business in city_restaurants_map[city]: city_rated_map[city][business] = business_rating_map[business] numberOfRecommendations = 5 # {city : most reviewed rated business} city_most_rated = defaultdict(list) for city in city_rated_map: mostRated = [(city_rated_map[city][x], x) for x in city_rated_map[city]] mostRated.sort(reverse = True) recommendations = [] for _, i in mostRated: if len(recommendations)>=numberOfRecommendations: break recommendations.append(i) city_most_rated[city] = recommendations def getRatingRecoAccuracyAt5(test): precisionSum = 0 for d in test: recommendations = city_most_rated[d[1]] precision = 0 for r in recommendations: if r in test[d][1]: precision+=1 precision /= len(recommendations) precisionSum+=precision return precisionSum/len(test) getRatingRecoAccuracyAt5(test_set) getRatingRecoAccuracyAt5(validation_set) ###Output _____no_output_____ ###Markdown Similarity Based RecommendationRecommendation on similarilty between users and businesses ###Code def Jaccard(s1, s2): numerator = len(s1.intersection(s2)) denomator = len(s1.union(s2)) if denomator == 0: return 0 return numerator / denomator ratingDict_u_i = {} #data of user item rating only f = './yelp_dataset/yelp_academic_dataset_review.json' for l in tqdm(parseData(f)): if l!=null: if (l['user_id'],l['business_id']) not in ratingDict_u_i: ratingDict_u_i[(l['user_id'],l['business_id'])] = l['stars'] else: #take best rating if ratingDict_u_i[(l['user_id'],l['business_id'])] < l['stars']: ratingDict_u_i[(l['user_id'],l['business_id'])] = l['stars'] print(len(ratingDict_u_i)) def mostSimilar(u, N, usersPerItem, itemsPerUser): similaritiesUsers = [] items = itemsPerUser[u] candidateUsers = set() for i in items: candidateUsers = candidateUsers.union(usersPerItem[i]) for u2 in candidateUsers: if u2 == u: continue sim = Jaccard(items, itemsPerUser[u2]) similaritiesUsers.append((sim,u2)) #Sort items by name similaritiesUsers.sort(key = lambda x:x[1]) #Sorting by similarity in reverse similaritiesUsers.sort(reverse=True, key = lambda x:x[0]) similarities=[] #getting the most favourite items of similar users for s,u2 in similaritiesUsers: #ratingDict_u_i = highestRatedItems(u2) maxRating = -1 favItem = None for i in itemsPerUser[u2]: if (u2,i) in ratingDict_u_i and maxRating < ratingDict_u_i[(u2,i)]: maxRating = ratingDict_u_i[(u2,i)] favItem = i elif favItem is None: favItem = i if favItem is not None: similarities.append(favItem) if len(similarities)>N: break return similarities[:N] numberOfRecommendations = 5 city_most_similar = defaultdict(list) businessPerUserPerCity = defaultdict(dict) usersPerBusinessPerCity = defaultdict(dict) for city in tqdm(city_restaurants_map): businessPerUser = defaultdict(set) # Maps users to business they have visited usersPerBusiness = defaultdict(set) # Maps business to the users that have visited for d in train_set: if d[1]==city: for business in train_set[d][0]: businessPerUser[d[0]].add(business) usersPerBusiness[business].add(d[0]) businessPerUserPerCity[city] = businessPerUser usersPerBusinessPerCity[city] = usersPerBusiness def getSimilarityRecoAccuracyAt5(test_set): precisionSum = 0 recommendations = [] ideal_cities = set() for city in city_restaurants_map: for d in test_set: if d[1]==city: recommendations = mostSimilar(d[0],numberOfRecommendations, businessPerUserPerCity[city], usersPerBusinessPerCity[city]) precision = 0 if len(recommendations)==0: recommendations = city_most_popular[d[1]] ideal_cities.add(d[1]) for r in recommendations: if r in test_set[d][1]: precision+=1 precision /= len(recommendations) precisionSum+=precision return precisionSum/len(test_set) getSimilarityRecoAccuracyAt5(test_set) getSimilarityRecoAccuracyAt5(validation_set) ###Output _____no_output_____ ###Markdown City restaurant EDA ###Code city_restaurant_graph = defaultdict(int) for city in city_restaurants_map: city_restaurant_graph[city_id_mapping_r[city]] = len(city_restaurants_map[city]) import matplotlib.pyplot as plt plt.figure(figsize = (10, 7)) plt.title("# business in each city in training set") plt.barh(list(city_restaurant_graph.keys()), city_restaurant_graph.values()) plt.yticks(fontsize=6)#, rotation=45) plt.xlabel('# businesses', fontsize=10) plt.ylabel('City', fontsize=10) plt.show() import pandas as pd city_restaurant_graph_pd = pd.DataFrame.from_dict(city_restaurant_graph.items()) city_restaurant_graph_pd.columns = ['City', '# Businesses'] city_restaurant_graph_pd.head() city_restaurant_graph_pd = city_restaurant_graph_pd.sort_values(ascending=False, by = '# Businesses') city_restaurant_graph_pd[:30].plot(kind='barh', x='City',y='# Businesses', stacked=False, figsize=[10,10]) plt.title('Top 30 cities by businesses listed') plt.show() len(city_restaurants_map[city_id_mapping['Austin']]) business_name_mapping = defaultdict(str) f = './yelp_dataset/yelp_academic_dataset_business.json' for l in tqdm(parseData(f)): if l!=null: if l['city']=='Austin' and l['business_id'] in business_id_mapping: business_name_mapping[business_id_mapping[l['business_id']]] = l['name'] business_name_review_ratings = [] for business in business_name_mapping: if business in business_rating_map: business_name_review_ratings.append((business_name_mapping[business],business_rating_map[business][0],business_rating_map[business][1])) business_name_review_ratings_pd = pd.DataFrame(business_name_review_ratings, columns=['Business Name','No.of Reviews', 'Ratings']) business_name_review_ratings_pd = business_name_review_ratings_pd.sort_values(ascending=False, by = ['No.of Reviews','Ratings']) business_name_review_ratings_pd.head() business_name_review_ratings_pd[:50].plot(kind='barh', x='Business Name',y='No.of Reviews', stacked=False, figsize=[10,10]) plt.title('Top 50 businesses by reviews and ratings - Austin') plt.show() business_name_review_ratings_pd = business_name_review_ratings_pd.sort_values(ascending=False, by = ['No.of Reviews','Ratings']) business_name_review_ratings_pd.head() ###Output _____no_output_____ ###Markdown Find number of unique cities explored by user+friends ###Code # friends city exploration user_friends_mapping = defaultdict(set) f = './yelp_dataset/yelp_academic_dataset_user.json' for l in tqdm(parseData(f)): if l!=null and len(l['friends'])>0: for friend in l['friends'].split(','): user_friends_mapping[l['user_id']].add(friend) len(user_friends_mapping) businessPerUser = defaultdict(set) f = './yelp_dataset/yelp_academic_dataset_review.json' for l in tqdm(parseData(f)): if l!=null: businessPerUser[l['user_id']].add(l['business_id']) business_city_map = defaultdict() f = './yelp_dataset/yelp_academic_dataset_business.json' for l in tqdm(parseData(f)): if l!=null: business_city_map[l['business_id']] = l['city'] user_city_span = defaultdictdict(set) for user in businessPerUser: for business in businessPerUser[user]: user_city_span[user].add(business_city_map[business]) user_friend_city_count = defaultdict(int) sum_city_per_user = 0 for user in user_friends_mapping: unique_cities = user_city_span[user] for friend in user_friends_mapping[user]: if friend in user_city_span: unique_cities.union(user_city_span[friend]) user_friend_city_count[user] = len(unique_cities) sum_city_per_user+=len(unique_cities) avg_city_per_user = sum_city_per_user/len(user_friend_city_count) print(avg_city_per_user) ###Output _____no_output_____
4. Resampling.ipynb
###Markdown What do the different methods do? method : {‘backfill’, ‘bfill’, ‘pad’, ‘ffill’, None}ffill and pad are the same and so are backfill and bfill, ffill, the forward fill helps to fill the values based on the previous index whereas the back fill help to fill in values based on the next index in the dataset Might any of these methods have pitfalls from a logical point of view?asfreq just provides a quick way to move around the data and if you are converting to a lower frequency time series it just drops the rest of the data which is clearly not what we want and hence comes resample. What's the difference between going to a higher frequency and a lower frequency?high frequency more creation of data more like ceating extra data points whereas in the lower frequency you tend to drop few data points that are not important ###Code converted = ts.asfreq('90Min', method = 'bfill') # What's different logically about going to a higher frequency vs a lower frequency? # What do you want to do when switching to a lower freqeuncy that is not logical when switching to a higher frequency? ts.resample('D').sum() ###Output _____no_output_____
examples/adaptive_loss_scaling/ssd/SSD Result.ipynb
###Markdown Understanding SSD Result===This notebook looks at how different loss scaling methods may affect the mixed precision training result on SSD models.The SSD model we choose to compare on is `ssd512`, which uses VGG-16 as its backbone and takes in `(512, 512)` input images (maybe?).It is trained on Pascal VOC 2007 and 2012 training sets and tested on VOC 2007 only.The technical details about how we revised the implementation of that SSD architecture in `chainer` is listed in the `README.md` file. ###Code import os import sys import matplotlib import matplotlib.pyplot as plt import pandas as pd import numpy as np from scipy.interpolate import spline import seaborn as sns matplotlib.rcParams.update({'font.size': 14}) B8_DIRS = { 'ada_loss': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss/', 'ada_loss_freq_10': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss_freq_10/', 'loss_scale_8': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_8', 'loss_scale_128': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128', 'loss_scale_256': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_256', 'dynamic_interval_10': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_1_dyn.20190829', } B32_DIRS = { 'ada_loss_4GPU': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4.20190909', 'ada_loss (GPU=4,F=10)': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_10.20190922/', 'ada_loss (GPU=4,F=100)': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100.20190922/', 'ada_loss (GPU=4,F=100) new': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100.20190924/', 'ada_loss (GPU=4,F=100,AB=8192)': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100_AB_8192.20190924/', 'loss_scale_128_GPU4': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128.GPU_16.20190912', 'dynamic_interval_10_GPU4': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/dyn_intv_10.GPU_16.20190912' } def load_train_log(train_dir): """ Load train log from the directory """ return pd.read_json(os.path.join(train_dir, 'log')) def plot_train_log(train_dir, label, ax, sample_freq=100): """ Plot a single train log on a give ax """ log = load_train_log(train_dir) print(train_dir) if 'validation/main/map' in log: print('Best mAP={:.4f}'.format(log['validation/main/map'].max())) x = log['iteration'].values[::sample_freq] y = log['main/loss'].values[::sample_freq] ax.plot(x, y, label='{}'.format(label)) ###Output _____no_output_____ ###Markdown Show training logsThe cells below show the training curves of different loss scaling settings. Note that the training without `no_loss_scale` terminates at the very beginning. When Batch Size is 8 ###Code fig, ax = plt.subplots(figsize=(5, 4)) for label, train_dir in B8_DIRS.items(): plot_train_log(train_dir, label, ax, sample_freq=100) ax.legend() ax.set_xlabel('Iteration') ax.set_ylabel('main/loss') ax.set_ylim([1, 8]) ax.set_title('Batch size is 8') ###Output /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss/ Best mAP=0.7927 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss_freq_10/ Best mAP=0.7873 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_8 Best mAP=0.7883 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128 Best mAP=0.7871 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_256 Best mAP=0.7836 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_1_dyn.20190829 Best mAP=0.7504 ###Markdown When Batch Size is 32 ###Code fig, ax = plt.subplots(figsize=(5, 4)) for label, train_dir in B32_DIRS.items(): plot_train_log(train_dir, label, ax, sample_freq=100) ax.legend() ax.set_xlabel('Iteration') ax.set_ylabel('main/loss') ax.set_ylim([1, 8]) ax.set_title('Batch size is 8') ###Output /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4.20190909 Best mAP=0.8030 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_10.20190922/ Best mAP=0.7978 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100.20190922/ Best mAP=0.8021 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100.20190924/ Best mAP=0.7997 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_4_F_100_AB_8192.20190924/ Best mAP=0.8031 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128.GPU_16.20190912 Best mAP=0.8001 /mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/dyn_intv_10.GPU_16.20190912 Best mAP=0.8017 ###Markdown Average ###Code train_dirs = [ '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128', '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128.GPU_1.20190908', '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128.GPU_1.20190912' ] best_map = [] for train_dir in train_dirs: log = pd.read_json(os.path.join(train_dir, 'log')) map_ = log['validation/main/map'].max() best_map.append(map_) print(map_) print(np.mean(best_map)) train_dirs = [ '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_8', ] best_map = [] for train_dir in train_dirs: log = pd.read_json(os.path.join(train_dir, 'log')) best_map.append(log['validation/main/map'].max()) print(np.mean(best_map)) train_dirs = [ '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_1_F_10.20190831/', ] best_map = [] for train_dir in train_dirs: log = pd.read_json(os.path.join(train_dir, 'log')) map_ = log['validation/main/map'].max() best_map.append(map_) print(map_) print(np.mean(best_map)) ###Output 0.792434174016666 0.792434174016666 ###Markdown Final Validation ResultBelow we show the comparison between different settings regarding mAP ###Code import pickle import re import chainer from collections import OrderedDict SNAPSHOT_DIR='/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/float32.20190908/grad_snapshots' re.findall(r'_\d+', 'iter_0_cnt_100_func_Convolution2DGradW_idx_0') def load_snapshots(snapshot_dir): """ Load gradient snapshots """ snapshot_files = os.listdir(snapshot_dir) grad_snaps = OrderedDict() # parse their iteration and counter for i, fp in enumerate(snapshot_files): if i % 100 == 0: print('==> Loaded {} number of snapshot files, {:.2f}% ...'.format(i + 1, (i + 1) / len(snapshot_files))) if 'ReLU' not in fp and 'Deconv' not in fp: continue nums = [int(s[1:]) for s in re.findall(r'_\d+', fp)] if len(nums) != 3: continue it, cnt, idx = nums if idx != 0: continue if it not in grad_snaps: grad_snaps[it] = {} grad_snaps[it][cnt] = fp grad_snaps_ = OrderedDict() for it in sorted(grad_snaps.keys()): grad_snaps_[it] = OrderedDict() base = min(grad_snaps[it].keys()) idx = 0 for cnt in sorted(grad_snaps[it].keys()): fp = os.path.join(snapshot_dir, grad_snaps[it][cnt]) data = pickle.load(open(fp, "rb" )) xp = chainer.backend.get_array_module(data) if xp != np: data = xp.asnumpy(data) grad_snaps_[it][idx] = (grad_snaps[it][cnt], data) idx += 1 return grad_snaps_ snaps = load_snapshots(SNAPSHOT_DIR) [(k, v[0], v[1].shape) for k, v in snaps[0].items()] iters = [10000, 50000, 80000, 110000] per_ufs = {} for it in iters: per_ufs[it] = [] for k, v in snaps[it].items(): per_ufs[it].append(np.sum(np.abs(v[1]) < 6e-8) / v[1].size * 100) loss_scales = {} for it in iters: loss_scales[it] = [] for k, v in snaps[it].items(): abs_grad = np.abs(v[1]) abs_grad = abs_grad[abs_grad < 6e-8] abs_grad = abs_grad[abs_grad > 0] if abs_grad.size < 100: loss_scales[it].append(1) else: min_grad = np.sort(abs_grad)[int(abs_grad.size * 0.001)] loss_scales[it].append(6e-8 / min_grad) overflow_loss_scales = {} for it in iters: max_grad = 0 for k, v in snaps[it].items(): max_ = np.abs(v[1]).max() if max_ > max_grad: max_grad = max_ print(max_grad) u_max = 8192 overflow_loss_scales[it] = u_max / max_grad overflow_loss_scales fig, ax = plt.subplots() for it in per_ufs.keys(): ax.plot(np.log2(per_ufs[it]), label='iter={}'.format(it)) ax.set_xlabel('Layer ID') ax.set_ylabel('Percentage of underflow (%)') ax.set_title('Underflow rate among activation gradients accross all layers') ax.legend() plt.tight_layout() fig.savefig('./SSD512_underflow_rate.pdf') fig, ax = plt.subplots() for it in loss_scales.keys(): ax.plot(np.log2(loss_scales[it]), label='iter={}'.format(it)) ax.axhline(np.log2(overflow_loss_scales[it])) ax.set_xlabel('Layer ID') ax.set_ylabel('log2(loss scale)') ax.set_title('Loss scale expected by each layer') ax.legend() plt.tight_layout() fig.savefig('./SSD512_expected_loss_scale.pdf') ###Output _____no_output_____ ###Markdown Loss scale ###Code train_dir = '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss' fp = os.path.join(train_dir, 'loss_scale.csv') df = pd.read_csv(fp, index_col=0) df.loc[df['val'].idxmax()] print(df[(df['iter'] == 105800)].to_string()) ###Output iter label key val 165048 105800 AdaLossConvolution2DFunction unbound 1.000000 165049 105800 AdaLossConvolution2DFunction bound 1.000000 165050 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165051 105800 AdaLossConvolution2DFunction final 1.000000 165052 105800 AdaLossConvolution2DFunction unbound 1.000000 165053 105800 AdaLossConvolution2DFunction bound 1.000000 165054 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165055 105800 AdaLossConvolution2DFunction final 1.000000 165056 105800 AdaLossConvolution2DFunction unbound 1.620799 165057 105800 AdaLossConvolution2DFunction bound 1.620799 165058 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165059 105800 AdaLossConvolution2DFunction final 1.000000 165060 105800 AdaLossConvolution2DFunction unbound 1.000000 165061 105800 AdaLossConvolution2DFunction bound 1.000000 165062 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165063 105800 AdaLossConvolution2DFunction final 1.000000 165064 105800 AdaLossConvolution2DFunction unbound 6.413222 165065 105800 AdaLossConvolution2DFunction bound 6.413222 165066 105800 AdaLossConvolution2DFunction power_of_two 4.000000 165067 105800 AdaLossConvolution2DFunction final 4.000000 165068 105800 AdaLossConvolution2DFunction unbound 1.153198 165069 105800 AdaLossConvolution2DFunction bound 1.153198 165070 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165071 105800 AdaLossConvolution2DFunction final 1.000000 165072 105800 AdaLossConvolution2DFunction unbound 1.000000 165073 105800 AdaLossConvolution2DFunction bound 1.000000 165074 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165075 105800 AdaLossConvolution2DFunction final 1.000000 165076 105800 AdaLossConvolution2DFunction unbound 1.000000 165077 105800 AdaLossConvolution2DFunction bound 1.000000 165078 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165079 105800 AdaLossConvolution2DFunction final 1.000000 165080 105800 AdaLossConvolution2DFunction unbound 1.546653 165081 105800 AdaLossConvolution2DFunction bound 1.546653 165082 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165083 105800 AdaLossConvolution2DFunction final 1.000000 165084 105800 AdaLossConvolution2DFunction unbound 1.000000 165085 105800 AdaLossConvolution2DFunction bound 1.000000 165086 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165087 105800 AdaLossConvolution2DFunction final 1.000000 165088 105800 AdaLossConvolution2DFunction unbound 1.000000 165089 105800 AdaLossConvolution2DFunction bound 1.000000 165090 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165091 105800 AdaLossConvolution2DFunction final 1.000000 165092 105800 AdaLossConvolution2DFunction unbound 1.000000 165093 105800 AdaLossConvolution2DFunction bound 1.000000 165094 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165095 105800 AdaLossConvolution2DFunction final 1.000000 165096 105800 AdaLossConvolution2DFunction unbound 1.640965 165097 105800 AdaLossConvolution2DFunction bound 1.640965 165098 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165099 105800 AdaLossConvolution2DFunction final 1.000000 165100 105800 AdaLossConvolution2DFunction unbound 1.000000 165101 105800 AdaLossConvolution2DFunction bound 1.000000 165102 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165103 105800 AdaLossConvolution2DFunction final 1.000000 165104 105800 AdaLossConvolution2DFunction unbound 1.000000 165105 105800 AdaLossConvolution2DFunction bound 1.000000 165106 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165107 105800 AdaLossConvolution2DFunction final 1.000000 165108 105800 AdaLossConvolution2DFunction unbound 2.117933 165109 105800 AdaLossConvolution2DFunction bound 2.117933 165110 105800 AdaLossConvolution2DFunction power_of_two 2.000000 165111 105800 AdaLossConvolution2DFunction final 2.000000 165112 105800 AdaLossConvolution2DFunction unbound 5.117975 165113 105800 AdaLossConvolution2DFunction bound 5.117975 165114 105800 AdaLossConvolution2DFunction power_of_two 4.000000 165115 105800 AdaLossConvolution2DFunction final 4.000000 165116 105800 AdaLossConvolution2DFunction unbound 1.000000 165117 105800 AdaLossConvolution2DFunction bound 1.000000 165118 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165119 105800 AdaLossConvolution2DFunction final 1.000000 165120 105800 AdaLossConvolution2DFunction unbound 1.223211 165121 105800 AdaLossConvolution2DFunction bound 1.223211 165122 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165123 105800 AdaLossConvolution2DFunction final 1.000000 165124 105800 AdaLossConvolution2DFunction unbound 10.096206 165125 105800 AdaLossConvolution2DFunction bound 10.096206 165126 105800 AdaLossConvolution2DFunction power_of_two 8.000000 165127 105800 AdaLossConvolution2DFunction final 8.000000 165128 105800 AdaLossConvolution2DFunction unbound 20.491186 165129 105800 AdaLossConvolution2DFunction bound 16.000000 165130 105800 AdaLossConvolution2DFunction power_of_two 16.000000 165131 105800 AdaLossConvolution2DFunction final 16.000000 165132 105800 AdaLossConvolution2DFunction unbound 1.255960 165133 105800 AdaLossConvolution2DFunction bound 1.255960 165134 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165135 105800 AdaLossConvolution2DFunction final 1.000000 165136 105800 AdaLossConvolution2DFunction unbound 1.398603 165137 105800 AdaLossConvolution2DFunction bound 1.398603 165138 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165139 105800 AdaLossConvolution2DFunction final 1.000000 165140 105800 AdaLossConvolution2DFunction unbound 4.635605 165141 105800 AdaLossConvolution2DFunction bound 4.635605 165142 105800 AdaLossConvolution2DFunction power_of_two 4.000000 165143 105800 AdaLossConvolution2DFunction final 4.000000 165144 105800 AdaLossConvolution2DFunction unbound 1.000000 165145 105800 AdaLossConvolution2DFunction bound 1.000000 165146 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165147 105800 AdaLossConvolution2DFunction final 1.000000 165148 105800 AdaLossConvolution2DFunction unbound 1.000000 165149 105800 AdaLossConvolution2DFunction bound 1.000000 165150 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165151 105800 AdaLossConvolution2DFunction final 1.000000 165152 105800 AdaLossConvolution2DFunction unbound 283.708470 165153 105800 AdaLossConvolution2DFunction bound 16.000000 165154 105800 AdaLossConvolution2DFunction power_of_two 16.000000 165155 105800 AdaLossConvolution2DFunction final 16.000000 165156 105800 AdaLossConvolution2DFunction unbound 1.683527 165157 105800 AdaLossConvolution2DFunction bound 1.683527 165158 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165159 105800 AdaLossConvolution2DFunction final 1.000000 165160 105800 AdaLossConvolution2DFunction unbound 2.773432 165161 105800 AdaLossConvolution2DFunction bound 2.773432 165162 105800 AdaLossConvolution2DFunction power_of_two 2.000000 165163 105800 AdaLossConvolution2DFunction final 2.000000 165164 105800 AdaLossConvolution2DFunction unbound 4.669950 165165 105800 AdaLossConvolution2DFunction bound 4.669950 165166 105800 AdaLossConvolution2DFunction power_of_two 4.000000 165167 105800 AdaLossConvolution2DFunction final 4.000000 165168 105800 AdaLossConvolution2DFunction unbound 2.700048 165169 105800 AdaLossConvolution2DFunction bound 2.000000 165170 105800 AdaLossConvolution2DFunction power_of_two 2.000000 165171 105800 AdaLossConvolution2DFunction final 2.000000 165172 105800 AdaLossConvolution2DFunction unbound 1.593755 165173 105800 AdaLossConvolution2DFunction bound 1.000000 165174 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165175 105800 AdaLossConvolution2DFunction final 1.000000 165176 105800 AdaLossConvolution2DFunction unbound 2.850127 165177 105800 AdaLossConvolution2DFunction bound 1.000000 165178 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165179 105800 AdaLossConvolution2DFunction final 1.000000 165180 105800 AdaLossConvolution2DFunction unbound 4.764731 165181 105800 AdaLossConvolution2DFunction bound 1.000000 165182 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165183 105800 AdaLossConvolution2DFunction final 1.000000 165184 105800 AdaLossConvolution2DFunction unbound 3.609693 165185 105800 AdaLossConvolution2DFunction bound 1.000000 165186 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165187 105800 AdaLossConvolution2DFunction final 1.000000 165188 105800 AdaLossConvolution2DFunction unbound 3.465343 165189 105800 AdaLossConvolution2DFunction bound 1.000000 165190 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165191 105800 AdaLossConvolution2DFunction final 1.000000 165192 105800 AdaLossConvolution2DFunction unbound 2.670266 165193 105800 AdaLossConvolution2DFunction bound 1.000000 165194 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165195 105800 AdaLossConvolution2DFunction final 1.000000 165196 105800 AdaLossConvolution2DFunction unbound 1.690814 165197 105800 AdaLossConvolution2DFunction bound 1.000000 165198 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165199 105800 AdaLossConvolution2DFunction final 1.000000 165200 105800 AdaLossConvolution2DFunction unbound 1.000000 165201 105800 AdaLossConvolution2DFunction bound 1.000000 165202 105800 AdaLossConvolution2DFunction power_of_two 1.000000 165203 105800 AdaLossConvolution2DFunction final 1.000000 ###Markdown Sanity Check ###Code train_dirs = { 'ada_loss': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_1_F_10_AB_2048.20190912', 'loss_scale_128': '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/loss_scale_128.GPU_1.20190912', } dfs = {} for key, val in train_dirs.items(): dfs[key] = pd.read_csv(os.path.join(val, 'sanity_check.csv'), index_col=0) ###Output _____no_output_____ ###Markdown Plot the difference in underflow rate: ###Code for it in [10000, 50000, 80000, 100000]: df1 = dfs['ada_loss'][df['iter'] == it] df2 = dfs['loss_scale_128'][df['iter'] == it] plt.plot(np.arange(len(df_)), (- df2['nuf_ls']) - (- df1['nuf_ls']), label='iter={}'.format(it)) plt.legend() plt.xlabel('Layer ID') plt.ylabel(r'$\Delta$ underflow rate') plt.title('Difference in underflow rate between fixed (128) and adaptive') plt.tight_layout() plt.savefig('SSD512_diff_underflow_rate.pdf') for it in [0, 1000, 10000, 100000]: df_ = df[df['iter'] == it] plt.plot(np.arange(len(df_)), df_['nuf_ls'] - df_['nuf_fp16'], label='iter={}'.format(it)) plt.xlabel('Layer ID') plt.ylabel(r'$\Delta$ underflow rate') plt.title('Reduction in underflow rate by adaptive loss scaling on SSD512') plt.legend() plt.tight_layout() plt.savefig('SSD512_underflow_rate_loss_scaled.pdf') ###Output _____no_output_____ ###Markdown Profiling ###Code train_dir = '/mnt/ccnas2/bdp/rz3515/train/ada_loss/object_detection/ssd512/ada_loss.GPU_1.20190920' train_log = pd.read_json(os.path.join(train_dir, 'log')) profile = pd.read_csv(os.path.join(train_dir, 'profile.csv'), index_col=0) profile full = (train_log['elapsed_time'].max() - train_log['elapsed_time'].min()) / (train_log['iteration'].max() - train_log['iteration'].min()) profile[profile['event_name'] == 'calc_stat']['time_elapsed'] / train_log['iteration'].max() / full * 100 profile[profile['event_name'] == 'total']['time_elapsed'] / train_log['iteration'].max() / full * 100 ###Output _____no_output_____
examples/btp01.ipynb
###Markdown Bayes Theorem ProblemsThis notebook presents code and exercises from Think Bayes, second edition.Copyright 2016 Allen B. DowneyMIT License: https://opensource.org/licenses/MIT ###Code from __future__ import print_function, division % matplotlib inline import warnings warnings.filterwarnings('ignore') import numpy as np from thinkbayes2 import Hist, Pmf, Cdf, Suite, Beta import thinkplot ###Output _____no_output_____ ###Markdown The sock problemYuzhong HuangThere are two drawers of socks. The first drawer has 40 white socks and 10 black socks; the second drawer has 20 white socks and 30 black socks. We randomly get 2 socks from a drawer, and it turns out to be a pair(same color) but we don't know the color of these socks. What is the chance that we picked the first drawer. ###Code # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown Chess-playing twinsAllen DowneyTwo identical twins are members of my chess club, but they never show up on the same day; in fact, they strictly alternate the days they show up. I can't tell them apart except that one is a better player than the other: Avery beats me 60% of the time and I beat Blake 70% of the time. If I play one twin on Monday and win, and the other twin on Tuesday and lose, which twin did I play on which day? ###Code # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown 1984by Katerina ZoltanThe place: Airstrip One. The reason: thoughtcrime. The time: ???John's parents were taken by the Thought Police and erased from all records. John is being initiated into the Youth League and must pass a test. He is asked whether his parents are good comrades. It is not clear what John's admission officer knows:1. He may know that John's parents have been erased and that John did not give them away.2. He may know only that John's parents have been erased.3. He may not know that John's parents have been erased.It is a well known fact that children who have parents that are 'good comrades' have twice the chances of passing the test. However, if the admission officer knows that their parents committed thoughtcrime (but not that they have been erased yet), a child that gave his parents away has three times the chances of getting in than a child who did not give them away.And if the admission officer knows the specifics of the arrest, a child that denies that the records are false and their parents existed has a 1/3 chance of getting in, while one who pretends that his parents never existed has a 2/3 chance. Lying to an admission officer that knows the parents have been erased will ensure that the child does not get in. Telling an admission officer that your parents do not exist when he does not know this will give you a 1/3 chance of getting in.There is a 60% chance the admission officer knows nothing, a 25% chance that he knows the parents have been erased, and a 15% chance that the officer knows all of the details. John says that he never had parents and is admitted into the Youth League. What did his admission officer know? ###Code # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown Where Am I? - The Robot Localization Problemby Kathryn HiteBayes's Theorem proves to be extremely useful when building mobile robots that need to know where they are within an environment at any given time. Because of the error in motion and sensor systems, a robot's knowledge of its location in the world is based on probabilities. Let's look at a simplified example that could feasibly be scaled up to create a working localization model.**Part A:** We have a robot that exists within a very simple environement. The map for this environment is a row of 6 grid cells that are colored either green or red and each labeled $x_1$, $x_2$, etc. In real life, a larger form of this grid environment could make up what is known as an occupancy grid, or a map of the world with places that the robot can go represented as green cells and obstacles as red cells.|G|R|R|G|G|G||-|-|-|-|-|-||$x_1$|$x_2$|$x_3$|$x_4$|$x_5$|$x_6$|The robot has a sensor that can detect color with an 80% chance of being accurate.Given that the robot gets dropped in the environment and senses **red**, what is the probability of it being in each of the six locations? ###Code # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown **Part B:** This becomes an extremely useful tool as we begin to move around the map. Let's try to get a more accurate knowledge of where the robot falls in the world by telling it to move forward one cell.The robot moves forward one cell from its previous position and the sensor reads **green**, again with an 80% accuracy rate. Update the probability of the robot having started in each location. ###Code # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown Red Dice problemsSuppose I have a six-sided die that is red on 2 sides and blue on 4 sides, and another die that's the other way around, red on 4 sides and blue on 2.I choose a die at random and roll it, and I tell you it came up red. What is the probability that I rolled the second die (red on 4 sides)? ###Code # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown Scenario BSuppose I roll the same die again. What is the probability I get red? ###Code # Solution goes here ###Output _____no_output_____ ###Markdown Scenario AInstead of rolling the same die, suppose I choosing a die at random and roll it. What is the probability that I get red? ###Code # Solution goes here ###Output _____no_output_____ ###Markdown Scenario CNow let's run a different experiment. Suppose I choose a die and roll it. If the outcome is red, I report the outcome. Otherwise I choose a die again and roll again, and repeat until I get red.What is the probability that the last die I rolled is the reddish one? ###Code # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown Scenario DFinally, suppose I choose a die and roll it over and over until I get red, then report the outcome. What is the probability that the die I rolled is the reddish one? ###Code # Solution goes here # Solution goes here ###Output _____no_output_____ ###Markdown The bus problemAllen DowneyTwo buses routes run past my house, headed for Arlington and Billerica. In theory, the Arlington bus runs every 20 minutes and the Billerica bus every 30 minutes, but by the time they get to me, the time between buses is well-modeled by exponential distributions with means 20 and 30.Part 1: Suppose I see a bus outside my house, but I can't read the destination. What is the probability that it is an Arlington bus?Part 2: Suppose I see a bus go by, but I don't see the destination, and 3 minutes later I see another bus. What is the probability that the second bus is going to Arlington? ###Code # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here # Solution goes here ###Output _____no_output_____
machine_learning/lesson 1 - linear regression/examples/Multiple_Linear_Regression.ipynb
###Markdown >Note: Always open this notebook in Colab for the best learning experience. Multiple Linear Regression: Can we predict life expectancy? In our previous lesson, we explored the topic of single-variable linear regression and demonstrated how to build a linear model as a single-layer neural network. In single-variable regression analysis, there is only one independent variable (i.e., x variable) and one dependent variable (i.e., y variable). However, what if we want to build a model to predict a label given multiple x variables (i.e., features)? To achieve this, we use multiple linear regression--a method to model the relationship between a dependent variable (y) and multiple independent variables (x). Intuitively, adding more x (in this case x columns) data to our features tends to help the model improve its overall performance.While adding more variables allows us to model more complex "real-world" relationships there are also additional steps we must take to make sure our model is sound and robust.In this lesson, we introduce the multiple linear regression method and demonstrate how to build a multiple linear regression model to predict life expectancy using the same [Life Expectancy(WHO)](https://www.kaggle.com/kumarajarshi/life-expectancy-who) dataset from the previous lesson. Multiple Linear RegressionBefore we discuss multiple linear regression, let's review single-variable linear regression. Recall that single-variable linear regression aims to fit a line to the data using the following formula: $$\hat{y} = wx + b$$where $w$ is known as the *weight*, and $b$ is the *bias* term.When the inputs ($x$) consists of $d$ features, we express the above linear function as: $$\hat{y} = w_1 x_1 + ... + w_d x_d + b.$$In machine learning, we typically work with *high-dimensional* datasets, meaning $d$ is large so there are many features. When $d$ is large, it's not convenient to write the above linear equation, instead we can express it using vector notation as follows:$$\hat{y} = \mathbf{w}^\top \mathbf{x} + b.$$where the vector $\mathbf{x} \in \mathbb{R}^d$ and the vector $\mathbf{w} \in \mathbb{R}^d$ contain the *features* and *weights* respectively.In the above equation, $\mathbf{x}$ corresponds to a single input sample. It is often more convenient to refer to features of our entire dataset of $n$ samples via the *matrix* $\mathbf{X} \in \mathbb{R}^{n \times d}$, where each sample is represented by a row and each feature by a column. For a collection of features $\mathbf{X}$ and labels $y \in \mathbb{R^n}$, the multiple linear regression function can be expressed as the matrix-vector product:$${\hat{\mathbf{y}}} = \mathbf{X} \mathbf{w} + b.$$The goal of multiple linear regression is to find the weight vector $\mathbf{w}$ and the bias term $b$ that results in the lowest prediction error. Notice that the goal is basically the same as single-variable linear regression except multiple weights are learned instead of one.The figure below illustrates a single-layer multiple linear regression neural network. The input layer consists of $d$ *neurons* each corresponding to a feature (from $\mathbf{x}$) and $d$ *connections* each corresponding to the *weight* between the output neuron an input neuron. Linear Regression: a multiple linear regression neural network Multiple Linear Regression: Can we predict life expectancy?Now that we know a bit about the multiple linear regression method, it's time to apply it to a real-world problem--predicting life expectancy given other health statistics. Our goal is to build a *single-layer fully-connected neural network* (i.e., a mutliple linear regression model) to predict life expectancy using the WHO's [Life Expectancy](https://www.kaggle.com/kumarajarshi/life-expectancy-who) dataset.Like single-variable linear regression, we will perform the following steps:1. Explore and prepare the dataset.2. Build the model.3. Train the model.4. Evaluate the model.5. Draw conclusions. ###Code # Import the libraries we be need. import pandas as pd import numpy as np import matplotlib.pyplot as plt # Import PyTorch. import torch import torch.nn as nn ###Output _____no_output_____ ###Markdown 1. Explore and prepare the dataset ###Code # load the dataset into a dataframe data_url = 'https://raw.githubusercontent.com/BreakoutMentors/Data-Science-and-Machine-Learning/main/datasets/Life%20Expectancy%20Data.csv' df = pd.read_csv(data_url) df.columns = [col_name.strip() for col_name in df.columns] # used to fix weird spaces in the column names df.head() # view the first 5 rows of the data # Looking at null values df.info() # First action of cleaning is to remove the rows with life expectancy nans df = df.loc[~df['Life expectancy'].isna(), :] print('The amount of columns that will be deleted if we deleted all rows with Nans:', df.shape[0] - df.dropna().shape[0]) print('That is a really high number! We should replace those values') # Second action is to fill all other Nans fillers = {'Alcohol':df['Alcohol'].mean(), 'Hepatitis B':df['Hepatitis B'].mean(), 'BMI':df['BMI'].mean(), 'Polio':df['Polio'].mean(), 'Total expenditure':df['Total expenditure'].mean(), 'Diphtheria':df['Diphtheria'].mean(), 'GDP':df['GDP'].mean(), 'Population':df['Population'].mean(), 'thinness 1-19 years':df['thinness 1-19 years'].mean(), 'thinness 5-9 years':df['thinness 5-9 years'].mean(), 'Income composition of resources':df['Income composition of resources'].mean(), 'Schooling':df['Schooling'].mean()} df = df.fillna(value=fillers) print('The amount of Nans still left:', df.isna().sum().sum()) ###Output The amount of columns that will be deleted if we deleted all rows with Nans: 1279 That is a really high number! We should replace those values The amount of Nans still left: 0 ###Markdown There are three variables we don't want to use in our model: Country, Year, Status. The reason why we do not want these variables is because they do not describe any information about health or wealth, and we do not want the location and year to have influence in the model. In the below cell, we will remove those variables and prepare the dataset for the model. Remember this process involves defining the features ($\mathbf{x}$) and the labels ($y$), splitting the dataset into a training and test set, and separating the features and the labels in both sets. In this case, the features will be all the other variables except life expectancy, which is our labels. ###Code # Dropping Country, Year and Status from dataframe df = df.drop(columns=['Country', 'Year', 'Status']) # define the x (features) and y (labels) variables x_cols = df.drop(columns=['Life expectancy']).columns.tolist() y_col = 'Life expectancy' print('x features: ', x_cols) print('y labels: ', y_col) # Calculating means and stds to normalize data means = df.loc[:, x_cols].mean().values stds = df.loc[:, x_cols].std().values # split the dataset into train/test datasets train = df.sample(frac=0.8, random_state=0) test = df.drop(train.index) # Splitting training data into validation data valid = train.sample(frac=0.1, random_state=0) train = train.drop(valid.index) # Deleting rows sampled for validation data # separate the x (features) and y (labels) in the train/valid/test datasets and normalizing them train_features = torch.tensor((train[x_cols].values-means)/stds, dtype=torch.float) test_features = torch.tensor((test[x_cols].values-means)/stds, dtype=torch.float) valid_features = torch.tensor((valid[x_cols].values-means)/stds, dtype=torch.float) train_labels = torch.tensor(train[y_col].values.reshape(-1, 1), dtype=torch.float) test_labels = torch.tensor(test[y_col].values.reshape(-1, 1), dtype=torch.float) valid_labels = torch.tensor(valid[y_col].values.reshape(-1, 1), dtype=torch.float) print('train features shape:', train_features.shape) print('train labels shape:', train_labels.shape) print('validation features shape:', valid_features.shape) print('validation labels shape:', valid_labels.shape) print('test features shape:', test_features.shape) print('test labels shape:', test_labels.shape) print('first 5 test labels:\n', test_labels[:5]) ###Output x features: ['Adult Mortality', 'infant deaths', 'Alcohol', 'percentage expenditure', 'Hepatitis B', 'Measles', 'BMI', 'under-five deaths', 'Polio', 'Total expenditure', 'Diphtheria', 'HIV/AIDS', 'GDP', 'Population', 'thinness 1-19 years', 'thinness 5-9 years', 'Income composition of resources', 'Schooling'] y labels: Life expectancy train features shape: torch.Size([2108, 18]) train labels shape: torch.Size([2108, 1]) validation features shape: torch.Size([234, 18]) validation labels shape: torch.Size([234, 1]) test features shape: torch.Size([586, 18]) test labels shape: torch.Size([586, 1]) first 5 test labels: tensor([[65.0000], [59.5000], [58.1000], [76.2000], [75.9000]]) ###Markdown The above code returns a training, validation, and test datasets. The `x features:` printed line above shows the 18 variables chosen as our *features*, such as Alcohol usage, BMI, and GDP. And the Life expectancy variable represents the *labels*. There are three datasets--a *training dataset*, a *validation dataset* and a *test dataset*. The `train_features` and `train_labels` arrays represent the features and labels of the training dataset, each containing 2108 samples. The `valid_features` and `valid_labels` arrays represent the features and labels of the validation dataset, each containing 234 samples. The `test_features` and `test_labels` arrays represent the features and labels of the test dataset, each containing 586 samples. Now that we have the *features* and *labels* separated, we are ready to build our model! 2. Build the modelNow that the data is ready, we can build the model! Before we define the model in Python code, let's write out its function given one input sample:$$\hat{\text{Life Expectancy}} = w_1 x_1 + w_2 x_2 + ... + w_{16} x_{16} + w_{17} x_{17} + w_{18} x_{18} + b,$$where 1-18 corresponds to one of the feature column we defined earlier (i.e., GDP, BMI, etc.). The function can be compressed using vector notation as:$$ \hat{\text{Life Expectancy}} = \mathbf{w}^\top \mathbf{x} + b.$$Given the entire training set, we write the matrix-vector equation for our linear model as:$${\hat{\textbf{Life Expectancy}}} = \mathbf{X} \mathbf{w} + b.$$The model we build and then train will try to find the optimal *weights* ($\mathbf{w}$) to minimize the difference between the real labels ($\textbf{Life Expectancy}$) and the predictions ($\hat{\textbf{Life Expectancy}})$.Now that we know the function we want to estimate, let's use [PyTorch](https://pytorch.org/) to build a linear regression model, just like we did in the last lesson. However, we are adding one parameter to the constructor of the class called `num_features`, which is the number of feature variables to add some customizability in the construction of our model. ###Code # build the linear model class LR_Model(nn.Module): """ @params num_features(int): The number of features to construct the input layer of the NN """ # Defining Constructor def __init__(self, num_features): super(LR_Model, self).__init__() # Defining Layers self.fc1 = nn.Linear(num_features, 1) def forward(self, x): return self.fc1(x) # Initializing model num_features = train_features.shape[1] # Getting number of features model = LR_Model(num_features) print('Model Summary') print(model) ###Output Model Summary LR_Model( (fc1): Linear(in_features=18, out_features=1, bias=True) ) ###Markdown The model we defined above is a multiple linear model that could also be called a *single-layer fully-connected neural network*. We defined it using the [`torch.nn.Linear`](https://pytorch.org/docs/stable/generated/torch.nn.Linear.htmltorch.nn.Linear) class. Note that we passed two arguments into the `torch.nn.Linear` class. The first one specifies the input feature dimension, which is 18 (corresponding to the number of x features in `train_features`), and the second one is the output feature dimension, which is a single scalar and therefore 1. Each input *feature* has a corresponding *weight* and there is one bias term. The *weights* and *bias*, or *parameters*, are connected to the single output *neuron*. Define the loss function and optimization algorithmAfter defining the model, we need to configure the *loss function*, *optimization algorithm* for the model. We will use mean squared error for the loss function and stochastic gradient descent (SGD) for the optimization algorithm. ###Code # Defining Loss Function loss_fn = nn.MSELoss() # Defining Optimizer optimizer = torch.optim.SGD(model.parameters(), lr=0.1) ###Output _____no_output_____ ###Markdown 3. Train the modelNow that we have a model, it's time to train it. We will train the model for 100 *epochs* (i.e., iterations), and record the training losses and validation losses for every epoch. ###Code # Try this epochs = 100 train_losses = [] valid_losses = [] for epoch in range(1, epochs+1): ################### # train the model # ################### # Setting model to train mode model.train() # Setting all gradients to zero optimizer.zero_grad() # Calculate Output train_predictions = model(train_features) # Calculate Loss train_loss = loss_fn(train_predictions, train_labels) # Calculate Gradients train_loss.backward() # Perform Gradient Descent Step optimizer.step() ###################### # validate the model # ###################### # Setting model to evaluation mode, no parameters will change model.eval() # Calculate Output valid_predictions = model(valid_features) # Calculate Loss valid_loss = loss_fn(valid_predictions, valid_labels) # Saving Losses train_losses.append(train_loss) valid_losses.append(valid_loss) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss)) ###Output Epoch: 1 Training Loss: 4927.366699 Validation Loss: 3122.039062 Epoch: 2 Training Loss: 3108.438721 Validation Loss: 2013.896606 Epoch: 3 Training Loss: 1992.411743 Validation Loss: 1313.419434 Epoch: 4 Training Loss: 1280.363037 Validation Loss: 862.721375 Epoch: 5 Training Loss: 825.485901 Validation Loss: 572.756470 Epoch: 6 Training Loss: 534.722473 Validation Loss: 385.774933 Epoch: 7 Training Loss: 348.769775 Validation Loss: 264.890045 Epoch: 8 Training Loss: 229.793015 Validation Loss: 186.468887 Epoch: 9 Training Loss: 153.635147 Validation Loss: 135.362579 Epoch: 10 Training Loss: 104.863373 Validation Loss: 101.853638 Epoch: 11 Training Loss: 73.613876 Validation Loss: 79.704132 Epoch: 12 Training Loss: 53.579807 Validation Loss: 64.906311 Epoch: 13 Training Loss: 40.727196 Validation Loss: 54.882664 Epoch: 14 Training Loss: 32.474895 Validation Loss: 47.973129 Epoch: 15 Training Loss: 27.170683 Validation Loss: 43.107002 Epoch: 16 Training Loss: 23.756796 Validation Loss: 39.592064 Epoch: 17 Training Loss: 21.555569 Validation Loss: 36.979801 Epoch: 18 Training Loss: 20.133001 Validation Loss: 34.978226 Epoch: 19 Training Loss: 19.210672 Validation Loss: 33.396645 Epoch: 20 Training Loss: 18.610186 Validation Loss: 32.109562 Epoch: 21 Training Loss: 18.216948 Validation Loss: 31.033894 Epoch: 22 Training Loss: 17.957478 Validation Loss: 30.113962 Epoch: 23 Training Loss: 17.784489 Validation Loss: 29.312050 Epoch: 24 Training Loss: 17.667620 Validation Loss: 28.602179 Epoch: 25 Training Loss: 17.587290 Validation Loss: 27.966141 Epoch: 26 Training Loss: 17.530901 Validation Loss: 27.390879 Epoch: 27 Training Loss: 17.490292 Validation Loss: 26.866753 Epoch: 28 Training Loss: 17.460178 Validation Loss: 26.386568 Epoch: 29 Training Loss: 17.437141 Validation Loss: 25.944696 Epoch: 30 Training Loss: 17.418934 Validation Loss: 25.536690 Epoch: 31 Training Loss: 17.404089 Validation Loss: 25.158937 Epoch: 32 Training Loss: 17.391651 Validation Loss: 24.808403 Epoch: 33 Training Loss: 17.380962 Validation Loss: 24.482571 Epoch: 34 Training Loss: 17.371595 Validation Loss: 24.179182 Epoch: 35 Training Loss: 17.363253 Validation Loss: 23.896332 Epoch: 36 Training Loss: 17.355715 Validation Loss: 23.632362 Epoch: 37 Training Loss: 17.348846 Validation Loss: 23.385727 Epoch: 38 Training Loss: 17.342541 Validation Loss: 23.155079 Epoch: 39 Training Loss: 17.336708 Validation Loss: 22.939215 Epoch: 40 Training Loss: 17.331297 Validation Loss: 22.737017 Epoch: 41 Training Loss: 17.326252 Validation Loss: 22.547482 Epoch: 42 Training Loss: 17.321543 Validation Loss: 22.369717 Epoch: 43 Training Loss: 17.317122 Validation Loss: 22.202860 Epoch: 44 Training Loss: 17.312979 Validation Loss: 22.046158 Epoch: 45 Training Loss: 17.309080 Validation Loss: 21.898909 Epoch: 46 Training Loss: 17.305391 Validation Loss: 21.760454 Epoch: 47 Training Loss: 17.301922 Validation Loss: 21.630203 Epoch: 48 Training Loss: 17.298635 Validation Loss: 21.507608 Epoch: 49 Training Loss: 17.295521 Validation Loss: 21.392174 Epoch: 50 Training Loss: 17.292582 Validation Loss: 21.283403 Epoch: 51 Training Loss: 17.289780 Validation Loss: 21.180870 Epoch: 52 Training Loss: 17.287111 Validation Loss: 21.084177 Epoch: 53 Training Loss: 17.284580 Validation Loss: 20.992945 Epoch: 54 Training Loss: 17.282158 Validation Loss: 20.906820 Epoch: 55 Training Loss: 17.279856 Validation Loss: 20.825483 Epoch: 56 Training Loss: 17.277651 Validation Loss: 20.748632 Epoch: 57 Training Loss: 17.275530 Validation Loss: 20.675991 Epoch: 58 Training Loss: 17.273514 Validation Loss: 20.607296 Epoch: 59 Training Loss: 17.271576 Validation Loss: 20.542303 Epoch: 60 Training Loss: 17.269697 Validation Loss: 20.480787 Epoch: 61 Training Loss: 17.267902 Validation Loss: 20.422537 Epoch: 62 Training Loss: 17.266174 Validation Loss: 20.367361 Epoch: 63 Training Loss: 17.264507 Validation Loss: 20.315060 Epoch: 64 Training Loss: 17.262899 Validation Loss: 20.265465 Epoch: 65 Training Loss: 17.261339 Validation Loss: 20.218410 Epoch: 66 Training Loss: 17.259832 Validation Loss: 20.173769 Epoch: 67 Training Loss: 17.258371 Validation Loss: 20.131371 Epoch: 68 Training Loss: 17.256947 Validation Loss: 20.091108 Epoch: 69 Training Loss: 17.255575 Validation Loss: 20.052847 Epoch: 70 Training Loss: 17.254234 Validation Loss: 20.016470 Epoch: 71 Training Loss: 17.252928 Validation Loss: 19.981890 Epoch: 72 Training Loss: 17.251661 Validation Loss: 19.948965 Epoch: 73 Training Loss: 17.250423 Validation Loss: 19.917622 Epoch: 74 Training Loss: 17.249207 Validation Loss: 19.887779 Epoch: 75 Training Loss: 17.248028 Validation Loss: 19.859341 Epoch: 76 Training Loss: 17.246866 Validation Loss: 19.832235 Epoch: 77 Training Loss: 17.245731 Validation Loss: 19.806391 Epoch: 78 Training Loss: 17.244625 Validation Loss: 19.781731 Epoch: 79 Training Loss: 17.243525 Validation Loss: 19.758188 Epoch: 80 Training Loss: 17.242462 Validation Loss: 19.735712 Epoch: 81 Training Loss: 17.241404 Validation Loss: 19.714241 Epoch: 82 Training Loss: 17.240372 Validation Loss: 19.693697 Epoch: 83 Training Loss: 17.239349 Validation Loss: 19.674082 Epoch: 84 Training Loss: 17.238346 Validation Loss: 19.655319 Epoch: 85 Training Loss: 17.237356 Validation Loss: 19.637350 Epoch: 86 Training Loss: 17.236387 Validation Loss: 19.620159 Epoch: 87 Training Loss: 17.235420 Validation Loss: 19.603672 Epoch: 88 Training Loss: 17.234472 Validation Loss: 19.587877 Epoch: 89 Training Loss: 17.233534 Validation Loss: 19.572739 Epoch: 90 Training Loss: 17.232601 Validation Loss: 19.558214 Epoch: 91 Training Loss: 17.231688 Validation Loss: 19.544283 Epoch: 92 Training Loss: 17.230780 Validation Loss: 19.530903 Epoch: 93 Training Loss: 17.229885 Validation Loss: 19.518057 Epoch: 94 Training Loss: 17.228991 Validation Loss: 19.505716 Epoch: 95 Training Loss: 17.228111 Validation Loss: 19.493839 Epoch: 96 Training Loss: 17.227232 Validation Loss: 19.482431 Epoch: 97 Training Loss: 17.226366 Validation Loss: 19.471437 Epoch: 98 Training Loss: 17.225506 Validation Loss: 19.460869 Epoch: 99 Training Loss: 17.224655 Validation Loss: 19.450682 Epoch: 100 Training Loss: 17.223806 Validation Loss: 19.440872 ###Markdown We get to ~19 validation mean squared error after training for 100 epochs on the training dataset. For reference, the same metric was ~172 in the single-variable linear regression model (from the previous lesson). Let's visualize the model's training progress. ###Code plt.plot(valid_losses) plt.xlabel('epoch') plt.ylabel('Mean Squared Error') plt.title('Validation Loss') plt.show() ###Output _____no_output_____ ###Markdown From the plot we can see that our model *converges* evenly throughout the 100 epochs, but it seems to *converge* at epoch 100. In other words, the most optimal parameters (weights and bias) are found after about the 100th training iteration. 4. Evaluate the modelNow that we trained our model, it's time to evaluate it using the *test* dataset, which we did not use when training the model. This gives us a sense of how well our model predicts unseen data, which is the case when we use it in the real world. ###Code # Setting model to evaluation mode model.eval() # Getting Predictions test_predictions = model(test_features) # Calculating loss, which is the Mean Squared Error loss = loss_fn(test_predictions, test_labels) print('Test set Mean Squared Error: ', round(loss.item(), 4)) ###Output Test set Mean Squared Error: 16.676 ###Markdown The mean squared error is around ~16, which is better than the single-variable linear model ~134. Is this good? We'll leave that decision up to you. Let's also visualize the prediction and real life expectancy values using data in the test set. ###Code # Flattening test predictions test_predictions = test_predictions.flatten().detach().numpy() # Getting higher boundary if max(test_labels) < max(train_labels): higher_boundary_lim = int(max(train_labels)) else: higher_boundary_lim = int(max(test_labels)) ax = plt.axes(aspect='equal') plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [Life Expectancy]') plt.ylabel('Predictions [Life Expectancy]') lims = [0, higher_boundary_lim + 1] # [0, 90] plt.xlim(lims) plt.ylim(lims) _ = plt.plot(lims, lims) ###Output _____no_output_____ ###Markdown It looks like our model predicts reasonably well. Let's take a look at the error distribution. ###Code error = test_predictions - test_labels.flatten().detach().numpy() plt.hist(error, bins = 10) plt.xlabel("Prediction Error [Life Expectancy]") _ = plt.ylabel("Count") ###Output _____no_output_____ ###Markdown The histogram shows that the errors are somewhat *Normally distributed* (also called *gaussian*), since we see that most errors are close to 0. 5. Draw ConclusionsWe built a single-layer fully-connected neural network model (multiple linear regression model) to predict life expectancy given a country's features. The model converged after about 100 epochs of training, and it achieved a test mean squared error of ~16. Including more features in the model outperformed the single-variable linear regression model, confirming the hypothesis we made last lesson that more features could improve performance. That said, we expect that a *deeper* model (more layers and neurons) and more data samples will improve performance. SummaryIn this lesson we took a deeper dive into multiple regression, from the perspective of neural networks. We built a single-layer fully-connected neural network and demonstrated how to train and evaluate it. We covered several important techniques, most importantly: using multiple features to train a linear model. ###Code ###Output _____no_output_____
data_handling.ipynb
###Markdown Introduction to Deep Learning with KerasTiago ValeAdapted from a tutorial by Michela Paganini This is the first of two notebooks for today's tutorial. You will find the second one here. What is Deep Learning? > Deep learning is a particular kind of machine learning that achieves great power and flexibility by learning to represent the world as a nested hierarchy of concepts, with each concept defined in relation to simpler concepts, and more abstract representations computed in terms of less abstract ones. I. Goodfellow, Y. Bengio, and A. Courville, "Deep Learning", MIT Press, 2016. http://www.deeplearningbook.org/ Representation Learning > Use machine learning to discover not only the mapping from representation to output but also the representation itself. I. Goodfellow, Y. Bengio, and A. Courville, "Deep Learning", MIT Press, 2016. http://www.deeplearningbook.org/ *e.g.*:* What’s 1753 + 131?* What’s MMXVII - MCMXCI? Our goal: > Learn a function that maps a set of raw electrical signals from the detector all the way to particle identificationFrom D. Whiteson:Deep learning tackles this by breaking down the task into a series of nested and simpler mappings, which can be described by each different layer of the model.Deeper nets give us the flexibility to sequentially learn more and more abstract features from iterative non-linear transformations of the input data.Higher layers of representation amplify aspects of the input that are important for the given task and suppress irrelevant variations. Learning Recipe In parametric, supervised learning:- **Objective**: in the form of a differentiable loss function, to calculate how far your current prediction is from the target- **Model**: parametrized by learnable parameters- **Training strategy**: how to update the parameters- **Data**: enough to learn from & test your performance --- Practical Example From CMS Software Tutorial, developed by Christian Sander and Alexander Schmidt, and available on .The samples come from a $t\bar{t}$ analysis and they are described in a very clear and accessible way in the documentation. This was chosen to provide you with a full demo of my typical workflow, going from a `ROOT` file to a fully trained Keras model.**Disclaimer: my applications won't make 100% physical sense -- please focus on the tools!** Before diving into the Deep Learning world, I want to spend a few minutes discussing some **data handling techniques** I use whenever I get started prototyping my applications. NumPy ###Code import numpy as np np.array([[0,1,2], [0,0,0], [1,2,-1]]) #+1 /2 **2 .ravel() etc. ###Output _____no_output_____ ###Markdown For a nice `numpy` intro, check out the CERN tutorial Loose your Loops with NumPy (and tons of online material). Idea: What if those columns represented various branches and every line represented an event/physics object? From ROOT to numpy: `root_numpy` Very easy to turn your `.root` files into Machine Learning compliant inputs using `numpy` and `root_numpy`. Let's take a look at the MC signal sample from our CMS open dataset: ###Code import uproot3 ttbar = uproot3.open('files/ttbar.root') # -- display your newly created object ttbar['events'].keys() # -- what data type is it? type(ttbar) # -- how many events are present? ttbar['events'].show() ttbar['events'].array('Jet_Px') ###Output _____no_output_____ ###Markdown Pandas Dataframes ###Code import pandas as pd ###Output _____no_output_____ ###Markdown One way of manipulating your data (slicing, filtering, removing variables, creating new features, taking operations of branches) in a simple, visually appealing way is to use `pandas` dataframes, a beatiful and efficient Python data structure library. Recommended for exploratory data analysis, probably not for high performance applications. ###Code # -- how to turn an ndarray into a pandas dataframe df = ttbar['events'].pandas.df(["Jet*","Electron*","Muon*","Photon*"],flatten=False) # -- better way of displaying your data df.head() # print the first few entries # -- ... or the last few df.tail() # -- check the shape: it should be [nb_events, nb_variables] df.shape df.info() df.keys() #df.columns ###Output _____no_output_____ ###Markdown Now, let's create a new dataframe that contains only jet-related branches by slicing our pre-existing ttbar dataframe ###Code # slice the dataframe jet_df = df[[key for key in df.keys() if key.startswith('Jet')]] jet_df.head() ###Output _____no_output_____ ###Markdown This would be useful if you wanted to classify your events only by using the properties of jets in each event. What if your application involved classifying jets, instead of events? In this case, you might want to turn your dataset from event-flat to jet-flat, i.e. a dataframe in which every row represents a jet and every column is a property of this jet. This is extremely easy to do using `pandas` and `numpy`: ###Code def flatten(column): ''' Args: ----- column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done) e.g.: my_df['some_variable'] Returns: -------- flattened out version of the column. For example, it will turn: [1791, 2719, 1891] [1717, 1, 0, 171, 9181, 537, 12] [82, 11] ... into: 1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ... ''' try: return np.array([v for e in column for v in e]) except (TypeError, ValueError): return column # -- ok, let's try it out! df_flat = pd.DataFrame({k: flatten(c) for k, c in jet_df.iteritems()}) df_flat.head() ###Output _____no_output_____ ###Markdown Plot your data using Matplotlib Using `pandas` in conjunction with `matplotlib`, you can also inspect your variables super quickly. Check out the following cells for a quick example. ###Code import matplotlib import matplotlib.pyplot as plt %matplotlib inline # iterate through the columns for key in df_flat.keys(): # plotting settings matplotlib.rcParams.update({'font.size': 16}) fig = plt.figure(figsize=(5, 5), dpi=100) bins = np.linspace(min(df_flat[key]), max(df_flat[key]), 30) # plot! _ = plt.hist(df_flat[key], bins=bins, histtype='step', label=r'$t\overline{t}$') # decorate plt.xlabel(key) plt.ylabel('Number of Jets') plt.legend() plt.plot() ###Output <string>:6: RuntimeWarning: Converting input from bool to <class 'numpy.uint8'> for compatibility. ###Markdown It's really easy and intuitive to add new columns to a dataframe. You can also define them as functions of other columns. This is great if you need to build your own hand-crafted variables. ###Code df['Jet_P'] = (df['Jet_Px']**2 + df['Jet_Py']**2 + df['Jet_Pz']**2)**(0.5) # -- again, you can easily slice dataframes by specifying the names of the branches you would like to select df[['Jet_P', 'Jet_Px', 'Jet_Py', 'Jet_Pz', 'Jet_E']].head() import uproot_methods # -- you can also build four vectors and store them in a new column in 1 line of code from uproot_methods import TLorentzVector df['Jet_4V'] = [map(lambda args: TLorentzVector(*args), zip(px, py, pz, e)) for (_, (px, py, pz, e)) in df[['Jet_Px', 'Jet_Py', 'Jet_Pz', 'Jet_E']].iterrows()] # -- look at the 4-vectors of the jets in the first 5 events [_ for _ in df['Jet_4V'].head()] # -- calculate the mass (or any other property) of all the jets in the first event [jet.mass for jet in df['Jet_4V'][0]] ###Output _____no_output_____ ###Markdown CIFAR 10 ###Code import torch import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt # Applying transforms to an image Some are mandatory like conversion to a tensor # and normalize the image. Others are optional and used for data augmentation # (like random crop, random jitter etc.) transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5 , 0.5, 0.5), (0.5, 0.5, 0.5))]) ###Output _____no_output_____ ###Markdown torch.utils.data.Dataset is an abstract class inherited by all the in-built datasets of pytorch. This class represents a dataset by using the __len__() and __iter__() function allows clients to iterate through datasets. Custome implementations should inherit this class and provide standard interface to it's clients. ###Code trainset = torchvision.datasets.CIFAR10(root = './data', train = True, download = True, transform = transform) len(trainset.train_data) plt.imshow(trainset.train_data[1]) print(trainset.train_labels[1]) # happens to be truck class ###Output _____no_output_____ ###Markdown torch.utils.data.DataLoader can load multiple samples parallely. It provides aniterator to iterate over batches, with the batchsize specified. It helps in- Batching the data- Shuffling the data- Loading the data in parallel using multiprocessing workers. ###Code trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True, num_workers=2) ###Output _____no_output_____ ###Markdown training loop ###Code for i,data in enumerate(trainloader): data, labels = data print("iteration",i) print("") print("type of data:",type(data)) print("size of data:",data.size()) print("") print("type of labels:",type(labels)) print("size of labels:",labels.size()) # model is trained here break ###Output iteration 0 type of data: <class 'torch.FloatTensor'> size of data: torch.Size([10, 3, 32, 32]) type of labels: <class 'torch.LongTensor'> size of labels: torch.Size([10]) ###Markdown Custom Datasets For reading custom datasets we need to create our own custom Dataset class which inherits from torch.utils.data.Dataset Then we use torch.utils.DataLoader to iterate through the data as well as for shuffling We need to override the len and iter function to provide our own functionality in the dataset class ###Code from __future__ import print_function, division import os import torch import pandas as pd from skimage import io, transform import numpy as np import matplotlib.pyplot as plt from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils # Ignore warnings import warnings warnings.filterwarnings("ignore") plt.ion() # interactive mode landmarks_frame = pd.read_csv('faces/face_landmarks.csv') landmarks_frame.head() n = 65 img_name = landmarks_frame.iloc[n,0] landmarks = landmarks_frame.iloc[n, 1:].as_matrix() landmarks = landmarks.astype('float').reshape(-1,2) landmarks.shape landmarks[:4] def show_landmarks(image, landmarks): "Show image with landmarks" plt.imshow(image) plt.scatter(landmarks[:,0], landmarks[:,1], s=10, marker='.', c='r') plt.pause(0.001) plt.figure() show_landmarks(io.imread(os.path.join('faces/',img_name)), landmarks) plt.show() ###Output _____no_output_____ ###Markdown Writing Custom Dataset Class We need to override following methods:- \_\_len\_\_- \_\_getitem\_\_ to support indexing such that dataset[i] can return ith sample ###Code class FaceLandmarksDataset(Dataset): """Face Landmarks Dataset""" def __init__(self, csv_file, root_dir, transform = None): self.landmarks_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.landmarks_frame) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx,0]) image = io.imread(img_name) landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix() landmarks = landmarks.astype('float').reshape(-1,2) sample = {'image': image, 'landmarks': landmarks} if self.transform: sample = self.transform(sample) return sample face_dataset = FaceLandmarksDataset(csv_file="faces/face_landmarks.csv", root_dir="faces/") fig = plt.figure() for i in range(len(face_dataset)): sample = face_dataset[i] print(i, sample['image'].shape, sample['landmarks'].shape) ax = plt.subplot(1, 4, (i + 1)) plt.tight_layout() ax.set_title('Sample #{}'.format(i)) ax.axis('off') show_landmarks(**sample) if i == 3: plt.show() break ###Output 0 (324, 215, 3) (68, 2) ###Markdown Transform the images for Neural Net processing and data augmentation - Rescale: to scale the image- RandomCrop: crop the image randomly. This is data augmentation- ToTensor: Convert numpy images to torch images Making callable transforms so that params need not to be passed again and again. ###Code class Rescale(object): def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) self.output_size = output_size def __call__(self, sample): image, landmarks = sample['image'], sample['landmarks'] h, w = image.shape[:2] if isinstance(self.output_size, int): if h>w: new_h, new_w = self.output_size * h / w, self.output_size else: new_h, new_w = self.output_size, self.output_size*w/h else: new_h, new_w = self.output_size new_h, new_w = int(new_h), int(new_w) img = transform.resize(image, (new_h, new_w)) landmarks = landmarks * [new_w / w, new_h / h] return {'image': img, 'landmarks': landmarks} class RandomCrop(object): def __init__(self, output_size): assert isinstance(output_size, (int, tuple)) if isinstance(output_size, int): self.output_size = (output_size, output_size) else: assert len(output_size) == 2 self.output_size = output_size def __call__(self, sample): image, landmarks = sample['image'], sample['landmarks'] h, w = image.shape[:2] new_h, new_w = self.output_size top = np.random.randint(0, h - new_h) left = np.random.randint(0, w - new_w) image = image[top: top + new_h, left: left + new_w] landmarks = landmarks - [left, top] return {'image': image, 'landmarks': landmarks} class ToTensor(object): def __call__(self, sample): image, landmarks = sample['image'], sample['landmarks'] image = image.transpose((2,0,1)) return {'image': torch.from_numpy(image), 'landmarks': torch.from_numpy(landmarks)} ###Output _____no_output_____ ###Markdown Compose and apply transforms ###Code scale = Rescale(256) crop = RandomCrop(128) composed = transforms.Compose([Rescale(256), RandomCrop(224)]) # Apply each of the above transforms on sample. fig = plt.figure() sample = face_dataset[65] for i, tsfrm in enumerate([scale, crop, composed]): transformed_sample = tsfrm(sample) ax = plt.subplot(1, 3, i + 1) plt.tight_layout() ax.set_title(type(tsfrm).__name__) show_landmarks(**transformed_sample) plt.show() transformed_dataset = FaceLandmarksDataset(csv_file="faces/face_landmarks.csv", root_dir="faces/", transform = transforms.Compose([Rescale(256), RandomCrop(224), ToTensor()])) for i in range(len(transformed_dataset)): sample = transformed_dataset[i] print(i, sample['image'].size(), sample['landmarks'].size()) if i == 3: break ###Output 0 torch.Size([3, 224, 224]) torch.Size([68, 2]) 1 torch.Size([3, 224, 224]) torch.Size([68, 2]) 2 torch.Size([3, 224, 224]) torch.Size([68, 2]) 3 torch.Size([3, 224, 224]) torch.Size([68, 2]) ###Markdown Still missing:- Batching the data- Shuffling the data- Load the data in parallel using multiprocessing workers. we will use torch.utils.data.DataLoader that provides all these features. ###Code data_loader = DataLoader(transformed_dataset, batch_size=4, shuffle=True, num_workers=4) #Helper function to show a batch def show_landmarks_batch(sample_batched): images_batch, landmarks_batch = sample_batched['image'], sample_batched['landmarks'] batch_size = len(images_batch) im_size = images_batch.size(2) grid = utils.make_grid(images_batch) plt.imshow(grid.numpy().transpose((1,2,0))) for i in range(batch_size): plt.scatter(landmarks_batch[i,:,0].numpy() + i * im_size, landmarks_batch[i, :, 1].numpy(), s=10, marker='.', c='r') plt.title('Batch from dataloader') for i_batch, sample_batched in enumerate(data_loader): print(i_batch, sample_batched['image'].size(), sample_batched['landmarks'].size()) if i_batch == 3: plt.figure() show_landmarks_batch(sample_batched) plt.axis('off') plt.ioff() plt.show() break ###Output 0 torch.Size([4, 3, 224, 224]) torch.Size([4, 68, 2]) 1 torch.Size([4, 3, 224, 224]) torch.Size([4, 68, 2]) 2 torch.Size([4, 3, 224, 224]) torch.Size([4, 68, 2]) 3 torch.Size([4, 3, 224, 224]) torch.Size([4, 68, 2]) ###Markdown Introduction to Deep Learning with Kerasand HEP Data Exploration in PythonMichela Paganini - Yale UniversityHigh Energy Phenomenology, Experiment and Cosmology Seminar Series This is the first of two notebooks for today's tutorial. You will find the second one here. What is Deep Learning? > Deep learning is a particular kind of machine learning that achieves great power and flexibility by learning to represent the world as a nested hierarchy of concepts, with each concept defined in relation to simpler concepts, and more abstract representations computed in terms of less abstract ones. I. Goodfellow, Y. Bengio, and A. Courville, "Deep Learning", MIT Press, 2016. http://www.deeplearningbook.org/ Representation Learning > Use machine learning to discover not only the mapping from representation to output but also the representation itself. I. Goodfellow, Y. Bengio, and A. Courville, "Deep Learning", MIT Press, 2016. http://www.deeplearningbook.org/ *e.g.*:* What’s 1753 + 131?* What’s MMXVII - MCMXCI? Our goal: > Learn a function that maps a set of raw electrical signals from the detector all the way to particle identificationFrom D. Whiteson:Deep learning tackles this by breaking down the task into a series of nested and simpler mappings, which can be described by each different layer of the model.Deeper nets give us the flexibility to sequentially learn more and more abstract features from iterative non-linear transformations of the input data.Higher layers of representation amplify aspects of the input that are important for the given task and suppress irrelevant variations. Learning Recipe In parametric, supervised learning:- **Objective**: in the form of a differentiable loss function, to calculate how far your current prediction is from the target- **Model**: parametrized by learnable parameters- **Training strategy**: how to update the parameters- **Data**: enough to learn from & test your performance --- Practical Example From CMS Software Tutorial, developed by Christian Sander and Alexander Schmidt, and available on .The samples come from a $t\bar{t}$ analysis and they are described in a very clear and accessible way in the documentation. This was chosen to provide you with a full demo of my typical workflow, going from a `ROOT` file to a fully trained Keras model.**Disclaimer: my applications won't make 100% physical sense -- please focus on the tools!** Before diving into the Deep Learning world, I want to spend a few minutes discussing some **data handling techniques** I use whenever I get started prototyping my applications. NumPy ###Code import numpy as np np.array([[0,1,2], [0,0,0], [1,2,-1]]) #+1 /2 **2 .ravel() etc. ###Output _____no_output_____ ###Markdown For a nice `numpy` intro, check out the CERN tutorial Loose your Loops with NumPy (and tons of online material). Idea: What if those columns represented various branches and every line represented an event/physics object? From ROOT to numpy: `root_numpy` Very easy to turn your `.root` files into Machine Learning compliant inputs using `numpy` and `root_numpy`. ###Code from numpy.lib.recfunctions import stack_arrays from root_numpy import root2array, root2rec import glob ###Output _____no_output_____ ###Markdown Using one single function from `root_numpy`, you can open your `.root` file and turn it into an `ndarray`, a Python object equivalent to an n-dimensional matrix. All you need to do is to pass it the name of the file you'd like to open. Other keyword arguments are specified below. Let's take a look at the MC signal sample from our CMS open dataset: ###Code ttbar = root2array('files/ttbar.root') # -- display your newly created object ttbar # -- what data type is it? type(ttbar) # -- how many events are present? ttbar.shape # -- what are the names of the branches? ttbar.dtype.names ###Output _____no_output_____ ###Markdown Pandas Dataframes ###Code import pandas as pd ###Output _____no_output_____ ###Markdown One way of manipulating your data (slicing, filtering, removing variables, creating new features, taking operations of branches) in a simple, visually appealing way is to use `pandas` dataframes, a beatiful and efficient Python data structure library. Recommended for exploratory data analysis, probably not for high performance applications. ###Code # -- how to turn an ndarray into a pandas dataframe df = pd.DataFrame(ttbar) # -- better way of displaying your data df.head() # print the first few entries # -- ... or the last few df.tail() # -- check the shape: it should be [nb_events, nb_variables] df.shape df.info() df.keys() #df.columns ###Output _____no_output_____ ###Markdown To summarize, if you want to go directly from `.root` files to pandas dataframes, you can do so in 3 lines of Python code. I like to use this function below in all my application whenever I load in data from a `ROOT` file. Feel free to copy it and use it! ###Code def root2pandas(files_path, tree_name, **kwargs): ''' Args: ----- files_path: a string like './data/*.root', for example tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root file that we want to open kwargs: arguments taken by root2array, such as branches to consider, start, stop, step, etc Returns: -------- output_panda: a pandas dataframe like allbkg_df in which all the info from the root file will be stored Note: ----- if you are working with .root files that contain different branches, you might have to mask your data in that case, return pd.DataFrame(ss.data) ''' # -- create list of .root files to process files = glob.glob(files_path) # -- process ntuples into rec arrays ss = stack_arrays([root2array(fpath, tree_name, **kwargs).view(np.recarray) for fpath in files]) try: return pd.DataFrame(ss) except Exception: return pd.DataFrame(ss.data) # -- usage of root2pandas singletop = root2pandas('./files/single_top.root', 'events') ###Output _____no_output_____ ###Markdown We just turned a HEP-specific `ROOT` file into a standard data format that can be used by any ML expert. You can now save your data out to widely accepted data formats such as `HDF5` or even `CSV`, share it with your collaborators from the ML community without them having to learn how to use the ROOT library or other CERN-specific analysis tools. ###Code # -- save a pandas df to hdf5 (better to first convert it back to ndarray, to be fair) import deepdish.io as io io.save('ttbar.h5', df) singletop.to_hdf('try.h5', 'branches') # -- let's load it back in to make sure it actually worked! new_df = io.load('ttbar.h5') new_df.head() # -- check the shape again -- nice check to run every time you create a df new_df.shape ###Output _____no_output_____ ###Markdown --- Now, let's create a new dataframe that contains only jet-related branches by slicing our pre-existing ttbar dataframe ###Code # slice the dataframe jet_df = df[[key for key in df.keys() if key.startswith('Jet')]] jet_df.head() ###Output _____no_output_____ ###Markdown This would be useful if you wanted to classify your events only by using the properties of jets in each event. What if your application involved classifying jets, instead of events? In this case, you might want to turn your dataset from event-flat to jet-flat, i.e. a dataframe in which every row represents a jet and every column is a property of this jet. This is extremely easy to do using `pandas` and `numpy`: ###Code def flatten(column): ''' Args: ----- column: a column of a pandas df whose entries are lists (or regular entries -- in which case nothing is done) e.g.: my_df['some_variable'] Returns: -------- flattened out version of the column. For example, it will turn: [1791, 2719, 1891] [1717, 1, 0, 171, 9181, 537, 12] [82, 11] ... into: 1791, 2719, 1891, 1717, 1, 0, 171, 9181, 537, 12, 82, 11, ... ''' try: return np.array([v for e in column for v in e]) except (TypeError, ValueError): return column # -- ok, let's try it out! df_flat = pd.DataFrame({k: flatten(c) for k, c in jet_df.iteritems()}) df_flat.head() ###Output _____no_output_____ ###Markdown Plot your data using Matplotlib Using `pandas` in conjunction with `matplotlib`, you can also inspect your variables super quickly. Check out the following cells for a quick example. ###Code import matplotlib import matplotlib.pyplot as plt %matplotlib inline # iterate through the columns for key in df_flat.keys(): # plotting settings matplotlib.rcParams.update({'font.size': 16}) fig = plt.figure(figsize=(5, 5), dpi=100) bins = np.linspace(min(df_flat[key]), max(df_flat[key]), 30) # plot! _ = plt.hist(df_flat[key], bins=bins, histtype='step', label=r'$t\overline{t}$') # decorate plt.xlabel(key) plt.ylabel('Number of Jets') plt.legend() plt.plot() ###Output _____no_output_____ ###Markdown It's really easy and intuitive to add new columns to a dataframe. You can also define them as functions of other columns. This is great if you need to build your own hand-crafted variables. ###Code df['Jet_P'] = (df['Jet_Px']**2 + df['Jet_Py']**2 + df['Jet_Pz']**2)**(0.5) # -- again, you can easily slice dataframes by specifying the names of the branches you would like to select df[['Jet_P', 'Jet_Px', 'Jet_Py', 'Jet_Pz', 'Jet_E']].head() # -- you can also build four vectors and store them in a new column in 1 line of code from rootpy.vector import LorentzVector df['Jet_4V'] = [map(lambda args: LorentzVector(*args), zip(px, py, pz, e)) for (_, (px, py, pz, e)) in df[['Jet_Px', 'Jet_Py', 'Jet_Pz', 'Jet_E']].iterrows()] # -- look at the 4-vectors of the jets in the first 5 events [_ for _ in df['Jet_4V'].head()] # -- calculate the mass (or any other property) of all the jets in the first event [jet.M() for jet in df['Jet_4V'][0]] ###Output _____no_output_____
hdf5.ipynb
###Markdown | Author | Affiliation || ---- | ---- | | [Giuseppe La Rocca](mailto:[email protected]) | EGI Foundation | | [Enol Fernández](mailto:[email protected]) | EGI Foundation | | [Marco De Simone](mailto:[email protected]) | CERIC-ERIC || [Michal Orzechowski](mailto:[email protected]) | CYFRONET || [Łukasz Opioła](mailto:[email protected]) | CYFRONET || [Bartosz Kryza](mailto:[email protected]) | CYFRONET || [Miroslav Ruda](mailto:[email protected]) | CESNET-MCC || [Andrei Kirushchanka](mailto:[email protected]) | CESNET-MCC || [Christos Kanellopoulos](mailto:[email protected]) | GEANT || Created | Last updated | License| ---- | ---- | ---- || 01 April 2020 | 09 April 2020 | Apache v2.0--- About this notebook- This notebook is used to demonstrate how a scientist from one of the PaNOSC RIs can use the resources provided by EGI to perform analysis on the data sets obtained during an expirement.- The PaNOSC RI involved in this data transfer pilot is CERIC-ERIC from Trieste. For this pilot use case CERIC-ERIC provided access to hdf5 data sets.- CESNET-MCC is the cloud provider of the EGI Federation supporting this pilot use case with computing and storage resources. - To federate the hdf5 data sets, and enable data replication across different RI providers, the Onedata software stack will be used.- The EGI Notebooks service, based on JupyterHub technology, is used as agile environment to access and process data sets.- Federated credentials released by UmbrellaID are used to access the services and resources of this pilot.- To read and analyse hdf5 data sets we used the H5Glance library developed by the European-XFEL and the h5py Python library.--- High-level architecture of the PaNOSC Data Transfer Pilot![title](imgs/Architecture.png) Install additional librariesLibraries can also be installed from a Linux terminal ###Code %pip install h5glance ###Output Requirement already satisfied: h5glance in /opt/conda/lib/python3.7/site-packages (0.6) Requirement already satisfied: h5py in /opt/conda/lib/python3.7/site-packages (from h5glance) (2.10.0) Requirement already satisfied: htmlgen in /opt/conda/lib/python3.7/site-packages (from h5glance) (1.2.2) Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from h5py->h5glance) (1.14.0) Requirement already satisfied: numpy>=1.7 in /opt/conda/lib/python3.7/site-packages (from h5py->h5glance) (1.18.1) Note: you may need to restart the kernel to use updated packages. ###Markdown Import the necessary libraries ###Code import h5py from h5glance import H5Glance # Import auxiliary libraries import os, io import requests import matplotlib.pyplot as plt from PIL import Image import os.path from os import path # Import additional Onedata library from fs.onedatafs import OnedataFS ###Output _____no_output_____ ###Markdown Get the .h5 datasets location in the shared volume space ###Code folder_name="marco.desimone/samples/twinmic/" #folder_name="marco.desimone/samples/" # Listing all volume space headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/spaces/" %os.environ['ONEPROVIDER_HOST'] r = requests.get(url=url, headers=headers) print("Response from: %s" %os.environ['ONEPROVIDER_HOST']) print(r.json()) spaceId = r.json()[0]['spaceId'] space_name = r.json()[0]['name'] print("\nSpaceID = %s" %spaceId) print("Space Name = %s" %space_name) # Create the path in OZ where the .h5 datasets is stored OZ_path = os.path.join('/', space_name, folder_name) print("\nDatasets is available at: %s" % OZ_path) ###Output Response from: oneprovider-pn.onezone-panosc.egi.eu [{'spaceId': '37b5b4f6a9df5fe38500273755b37996', 'name': 'CERIC-ERIC'}] SpaceID = 37b5b4f6a9df5fe38500273755b37996 Space Name = CERIC-ERIC Datasets is available at: /CERIC-ERIC/marco.desimone/samples/twinmic/ ###Markdown Listing datasets in the volume space ###Code # Create connection to Oneprovider odfs = OnedataFS(os.environ['ONEPROVIDER_HOST'], os.environ['ONECLIENT_ACCESS_TOKEN'], force_proxy_io=True) # Open remote dir s1 = odfs.opendir(OZ_path) # List contents from remote dir s1.listdir('') ###Output _____no_output_____ ###Markdown Reading the specific .h5 datasets ###Code #file_name = "simulated.sample3d-2gb-1.h5" #file_name = "simulated.sample2d-10gb-001.h5" file_name = "twinmic_scan_41.h5" h5file_name = os.path.join(OZ_path, file_name) print("Loading datasets: [%s] from the volume space" %h5file_name) f = odfs.openbin(h5file_name, mode='r') os.environ['H5PY_DEFAULT_READONLY'] = '1' fh5py = h5py.File(f) H5Glance(fh5py) ###Output Loading datasets: [/CERIC-ERIC/marco.desimone/samples/twinmic/twinmic_scan_41.h5] from the volume space ###Markdown Plotting the datasets ###Code # This code snippet simulates the processing of datasets read from the volume space. # As result of this simulation, the PNG file will be uplodaded from the notebooks space # in the default OneProvider (CESNET-MCC). print("Available keys: %s" % fh5py.keys()) plt_file="result.png" if ("triggers" in fh5py.keys()): # Getting datasets triggers = fh5py['triggers'] timestamp = fh5py['triggers_timestamp'] sample_stage_x = fh5py['sample_motors/sample_stage_x'] sample_x_pos = fh5py['sample_motors/sample_x_pos'] sample_y_pos = fh5py['sample_motors/sample_y_pos'] sample_stage_y = fh5py['sample_motors/sample_stage_y'] print("- triggers: %s" %triggers[:]) print("- timestamp: %s" %timestamp[:]) print("- sample_stage_x: %s" %sample_stage_x[:]) print("- sample_x_pos: %s" %sample_x_pos[:]) print("- sample_y_pos: %s" %sample_y_pos[:]) print("- sample_stage_y: %s" %sample_stage_y[:]) # Define data, grid, legend, and save the plot plt.plot(triggers[:], sample_stage_x[:], label='triggers vs. sample_stage_x') plt.plot(triggers[:], sample_stage_y[:], label='triggers vs. sample_stage_y') plt.grid() plt.legend() plt.savefig(plt_file, dpi = (100)) else: print ("Working in progress..") #fh5py.close() #f.close() ###Output Available keys: <KeysViewHDF5 ['andor', 'dante', 'sample_motors', 'triggers', 'triggers_timestamp']> - triggers: [ 1 2 3 ... 2599 2600 2601] - timestamp: [1.57496154e+09 1.57496154e+09 1.57496154e+09 ... 1.57496179e+09 1.57496179e+09 1.57496179e+09] - sample_stage_x: [26200. 26250. 26300. ... 28600. 28650. 28700.] - sample_x_pos: [0. 0. 0. ... 0. 0. 0.] - sample_y_pos: [0. 0. 0. ... 0. 0. 0.] - sample_stage_y: [25800. 25800. 25800. ... 28300. 28300. 28300.] ###Markdown Transferring the result of the simulation This code snipped is used as example to:- Simulate the processing of datasets from the volume space, and - Trigger the data transfer in the back-up provider ###Code if path.exists(plt_file): # Define the absolute path in the specific oneprovider replica = OZ_path + plt_file # Reading local .PNG file im = Image.open(plt_file) byte_array = io.BytesIO() # Create the absolute path in the oneprovider and the file f1 = odfs.create(replica) f1 = odfs.openbin(replica, mode='w+') # Store bytes in the volume space of the os.environ['ONEPROVIDER_HOST'] provider im.save(byte_array, format='PNG') f1.write(byte_array.getvalue()) # Close remote file f1.close() # Replicate a file specified by absolute path to a specified provider. # This operation is asynchronous and it can take a long time depending # on the size of the data to move. # If the path parameter specifies a folder, entire folder is replicated # to requested provider. # # API documentation here: # https://onedata.org/#/home/api/stable/oneprovider?anchor=operation/replicate_file REPLICA_ONEPROVIDER_HOST="oneprovider-pn-ceric.onezone-panosc.egi.eu" headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/replicas%s" %(REPLICA_ONEPROVIDER_HOST, replica) r = requests.get(url=url, headers=headers) print(r.json()) else: print("Nothing to do!") ###Output [{'totalBlocksSize': 65616, 'providerId': '02f4a17f6010dd1d0d9a9705f696b66b', 'blocks': [[0, 65616]]}] ###Markdown Local data (HDF5 Example)This notebook walks through using Neuroglancer to view and interact with local data sources. ###Code import neuroglancer import h5py import requests import numpy as np import itertools from tqdm import tqdm from IPython.display import display, HTML # Download one of the CREMI datasets (https://cremi.org/) url = "https://cremi.org/static/data/sample_A_20160501.hdf" # 175M r = requests.get(url, stream=True) with open("sampleA.hdf", "wb") as f: for data in tqdm(r.iter_content(chunk_size=2**20), unit='MB', mininterval=1, total=int(r.headers.get('content-length', 0))//2**20): f.write(data) import os from urllib.parse import urlparse import neuroglancer from IPython.display import display, HTML # Uncomment the following if you intend to run from a remote server: # neuroglancer.set_server_bind_address('0.0.0.0') neuroglancer.set_static_content_source(url="https://neuroglancer-demo.appspot.com/python") viewer = neuroglancer.Viewer() if "BINDER_URL" in os.environ: viewer_url = "https://{0}{1}proxy/{2}{3}".format( "hub.gke.mybinder.org", os.environ["JUPYTERHUB_SERVICE_PREFIX"], urlparse(viewer.get_viewer_url()).port, urlparse(viewer.get_viewer_url()).path) else: viewer_url = viewer.get_viewer_url() display(HTML('Neuroglancer link: <a href="{0}">{0}</a>'.format(viewer_url))) # Open HDF file and create local layers for image data and segmentation h5f = h5py.File("sampleA.hdf", 'r') with viewer.txn() as s: # Raw data s.layers['raw'] = neuroglancer.ImageLayer( source = neuroglancer.LocalVolume(data=h5f['volumes/raw'], voxel_size=(4, 4, 40)) ) # Segmentation data s.layers['neuron_ids'] = neuroglancer.SegmentationLayer( source = neuroglancer.LocalVolume(data=h5f['volumes/labels/neuron_ids'], voxel_size=(4, 4, 40)) ) # Create and populate layers for pre-, post- and connectors # Unique ID for neuroglancer objects ngid = itertools.count(start=1) (pre_sites, post_sites, connectors) = ([], [], []) locations = [] #for location in h5f['annotations/locations']: # locations.append(np.flip(location)) # Result: a map from {id} -> {x,y,z} locations = dict(zip(h5f['annotations/ids'], h5f['annotations/locations'])) for (pre, post) in h5f['annotations/presynaptic_site/partners']: pre_site = np.flip(locations[pre]) post_site = np.flip(locations[post]) pre_sites.append(neuroglancer.EllipsoidAnnotation(center=pre_site, radii=(40,40,40), id=next(ngid))) post_sites.append(neuroglancer.EllipsoidAnnotation(center=post_site, radii=(40,40,40), id=next(ngid))) connectors.append(neuroglancer.LineAnnotation(point_a=pre_site,point_b=post_site, id=next(ngid))) with viewer.txn() as s: s.layers['connetors'] = neuroglancer.AnnotationLayer( voxel_size=(1,1,1), filter_by_segmentation=False, annotation_color='#ffff00', annotations=connectors, ) s.layers['pre_sites'] = neuroglancer.AnnotationLayer( voxel_size=(1,1,1), filter_by_segmentation=False, annotation_color='#00ff00', annotations=pre_sites, ) s.layers['post_sites'] = neuroglancer.AnnotationLayer( voxel_size=(1,1,1), filter_by_segmentation=False, annotation_color='#ff00ff', annotations=post_sites, ) ###Output _____no_output_____ ###Markdown | Author | Affiliation || ---- | ---- | | [Giuseppe La Rocca](mailto:[email protected]) | EGI Foundation | | [Enol Fernández](mailto:[email protected]) | EGI Foundation | | [Marco De Simone](mailto:[email protected]) | CERIC-ERIC || [Michal Orzechowski](mailto:[email protected]) | CYFRONET || [Łukasz Opioła](mailto:[email protected]) | CYFRONET || [Bartosz Kryza](mailto:[email protected]) | CYFRONET || [Miroslav Ruda](mailto:[email protected]) | CESNET-MCC || [Andrei Kirushchanka](mailto:[email protected]) | CESNET-MCC || [Christos Kanellopoulos](mailto:[email protected]) | GEANT || Created | Last updated | License| ---- | ---- | ---- || 01 April 2020 | 13 May 2020 | Apache v2.0--- About this notebook- This notebook is used to demonstrate how a scientist from one of the PaNOSC RIs can use the resources provided by EGI to perform analysis on the data sets obtained during an expirement.- The PaNOSC RI involved in this data transfer pilot is CERIC-ERIC from Trieste. For this pilot use case CERIC-ERIC provided access to hdf5 data sets.- CESNET-MCC is the cloud provider of the EGI Federation supporting this pilot use case with computing and storage resources. - To federate the hdf5 data sets, and enable data replication across different RI providers, the Onedata software stack will be used.- The EGI Notebooks service, based on JupyterHub technology, is used as agile environment to access and process data sets.- Federated credentials released by UmbrellaID are used to access the services and resources of this pilot.- To read and analyse hdf5 data sets we used the H5Glance library developed by the European-XFEL and the h5py Python library.--- High-level architecture of the PaNOSC Data Transfer Pilot![title](imgs/Architecture.png) Install additional librariesLibraries can also be installed from a Linux terminal ###Code %pip install h5glance ###Output Collecting h5glance Using cached h5glance-0.6-py3-none-any.whl (32 kB) Collecting htmlgen Using cached htmlgen-1.2.2-py3-none-any.whl (44 kB) Requirement already satisfied: h5py in /opt/conda/lib/python3.7/site-packages (from h5glance) (2.10.0) Requirement already satisfied: numpy>=1.7 in /opt/conda/lib/python3.7/site-packages (from h5py->h5glance) (1.18.1) Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from h5py->h5glance) (1.14.0) Installing collected packages: htmlgen, h5glance Successfully installed h5glance-0.6 htmlgen-1.2.2 Note: you may need to restart the kernel to use updated packages. ###Markdown Import the necessary libraries ###Code import h5py from h5glance import H5Glance # Import auxiliary libraries import os, io import requests import matplotlib.pyplot as plt from PIL import Image import os.path from os import path import time # Import additional Onedata library from fs.onedatafs import OnedataFS ###Output _____no_output_____ ###Markdown Get the .h5 datasets location in the shared volume space ###Code # Set the env. variable with the default Oneprovider host #ONEPROVIDER_HOST="cesnet-op.onezone-panosc.egi.eu" # Listing all volume space headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/spaces/" %os.environ['ONEPROVIDER_HOST'] r = requests.get(url=url, headers=headers) print("Response from the provider: %s" %os.environ['ONEPROVIDER_HOST']) print(r.json()) spaceId = r.json()[0]['spaceId'] space_name = r.json()[0]['name'] #print("\nSpaceID = %s" %spaceId) #print("Space Name = %s" %space_name) # Create the path in OZ where the .h5 datasets is stored #OZ_path = os.path.join('/', space_name, folder_name) #print("\nDatasets path at: %s" % OZ_path) ###Output Response from the provider: cesnet-op.onezone-panosc.egi.eu [{'spaceId': '37b5b4f6a9df5fe38500273755b37996', 'name': 'PaNOSC'}] ###Markdown Listing datasets in the volume space ###Code # Create connection to ONEPROVIDER_HOST odfs = OnedataFS(os.environ['ONEPROVIDER_HOST'], os.environ['ONECLIENT_ACCESS_TOKEN'], force_proxy_io=True) folder_name="samples/twinmic/" # Set the path in OZ where the .h5 datasets is stored OZ_path = os.path.join('/', space_name, folder_name) print("\nDatasets path: %s" % OZ_path) # Open remote dir s1 = odfs.opendir(OZ_path) # List contents from remote dir s1.listdir('') ###Output Datasets path: /PaNOSC/samples/twinmic/ ###Markdown Reading the specific .h5 datasets ###Code file_name = "twinmic_scan_41.h5" h5file_name = os.path.join(OZ_path, file_name) print("Loading datasets: [%s] from the volume space: [%s]" %(h5file_name,space_name) ) f = odfs.openbin(h5file_name, mode='r') os.environ['H5PY_DEFAULT_READONLY'] = '1' fh5py = h5py.File(f) H5Glance(fh5py) ###Output Loading datasets: [/PaNOSC/samples/twinmic/twinmic_scan_41.h5] from the volume space: [PaNOSC] ###Markdown Plotting the datasets ###Code # This code snippet simulates the processing of datasets read from the volume space. # As result of this simulation, the PNG file will be uplodaded from the notebooks space # in the default OneProvider (CESNET-MCC). get_millisecs = lambda: int(round(time.time() * 1000)) get_millisecs() print("Available keys: %s" % fh5py.keys()) plt_file="result_%s.png" %get_millisecs() if ("triggers" in fh5py.keys()): # Getting datasets triggers = fh5py['triggers'] timestamp = fh5py['triggers_timestamp'] sample_stage_x = fh5py['sample_motors/sample_stage_x'] sample_x_pos = fh5py['sample_motors/sample_x_pos'] sample_y_pos = fh5py['sample_motors/sample_y_pos'] sample_stage_y = fh5py['sample_motors/sample_stage_y'] print("- triggers: %s" %triggers[:]) print("- timestamp: %s" %timestamp[:]) print("- sample_stage_x: %s" %sample_stage_x[:]) print("- sample_x_pos: %s" %sample_x_pos[:]) print("- sample_y_pos: %s" %sample_y_pos[:]) print("- sample_stage_y: %s" %sample_stage_y[:]) # Define data, grid, legend, and save the plot plt.plot(triggers[:], sample_stage_x[:], label='triggers vs. sample_stage_x') plt.plot(triggers[:], sample_stage_y[:], label='triggers vs. sample_stage_y') plt.grid() plt.legend() plt.savefig(plt_file, dpi = (100)) else: print ("Working in progress..") #fh5py.close() #f.close() ###Output Available keys: <KeysViewHDF5 ['andor', 'dante', 'sample_motors', 'triggers', 'triggers_timestamp']> - triggers: [ 1 2 3 ... 2599 2600 2601] - timestamp: [1.57496154e+09 1.57496154e+09 1.57496154e+09 ... 1.57496179e+09 1.57496179e+09 1.57496179e+09] - sample_stage_x: [26200. 26250. 26300. ... 28600. 28650. 28700.] - sample_x_pos: [0. 0. 0. ... 0. 0. 0.] - sample_y_pos: [0. 0. 0. ... 0. 0. 0.] - sample_stage_y: [25800. 25800. 25800. ... 28300. 28300. 28300.] ###Markdown Transferring the result of the simulation This code snipped is used as example to:- Simulate the processing of datasets from the volume space, and - Trigger the data transfer in the back-up provider ###Code if path.exists(plt_file): # Define the path in the specific oneprovider where store the results of simulation replica = OZ_path + plt_file # Reading local .PNG file im = Image.open(plt_file) byte_array = io.BytesIO() # Create the absolute path in the oneprovider and the file f1 = odfs.create(replica) f1 = odfs.openbin(replica, mode='w+') # Store bytes in the volume space of the os.environ['ONEPROVIDER_HOST'] provider im.save(byte_array, format='PNG') f1.write(byte_array.getvalue()) # Close remote file f1.close() # Replicate a file specified by absolute path to a specified provider. # This operation is asynchronous and it can take a long time depending # on the size of the data to move. # If the path parameter specifies a folder, entire folder is replicated # to requested provider. # # API documentation here: # https://onedata.org/#/home/api/stable/oneprovider?anchor=operation/replicate_file REPLICA_ONEPROVIDER_HOST="ceric-cache.onezone-panosc.egi.eu" # Create connection to the REPLICA_ONEPROVIDER_HOST odfs = OnedataFS(REPLICA_ONEPROVIDER_HOST, os.environ['ONECLIENT_ACCESS_TOKEN'], force_proxy_io=True) # Open remote dir s1 = odfs.opendir(OZ_path) headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/replicas%s" %(REPLICA_ONEPROVIDER_HOST, replica) #print(url) r = requests.get(url=url, headers=headers) if (r.status_code == 200): print("[%s] - The replication request was accepted!" %r.status_code) print("Replica file = %s" %replica) print(r.json()) else: print(r.json()) else: print("Nothing to do!") ###Output [200] - The replication request was accepted! Replica file = /PaNOSC/samples/twinmic/result_1589441072907.png [{'totalBlocksSize': 65616, 'providerId': '1c6d39371ecb7652f2fd143aabfeb7cfch4629', 'blocks': [[0, 65616]]}, {'totalBlocksSize': 65616, 'providerId': 'fa70d48fc7e2af88d68e30191d8950b9ch20d9', 'blocks': [[0, 65616]]}] ###Markdown | Author | Affiliation || ---- | ---- | | [Giuseppe La Rocca](mailto:[email protected]) | EGI Foundation | | [Enol Fernández](mailto:[email protected]) | EGI Foundation | | [Andrea Manzi](mailto:[email protected]) | EGI Foundation | | [Marco De Simone](mailto:[email protected]) | CERIC-ERIC || [Michal Orzechowski](mailto:[email protected]) | CYFRONET || [Łukasz Opioła](mailto:[email protected]) | CYFRONET || [Bartosz Kryza](mailto:[email protected]) | CYFRONET || [Miroslav Ruda](mailto:[email protected]) | CESNET-MCC || [Andrei Kirushchanka](mailto:[email protected]) | CESNET-MCC || [Christos Kanellopoulos](mailto:[email protected]) | GEANT || Created | Last updated | License| ---- | ---- | ---- || 01 April 2020 | 02 Nove 2020 | Apache v2.0--- About this notebook- This notebook is used to demonstrate how a scientist from one of the PaNOSC RIs can use the resources provided by EGI to perform analysis on the data sets obtained during an expirement.- The PaNOSC RI involved in this data transfer pilot is CERIC-ERIC from Trieste. For this pilot use case CERIC-ERIC provided access to hdf5 data sets.- CESNET-MCC is the cloud provider of the EGI Federation supporting this pilot use case with computing and storage resources. - To federate the hdf5 data sets, and enable data replication across different RI providers, the Onedata software stack will be used.- The EGI Notebooks service, based on JupyterHub technology, is used as agile environment to access and process data sets.- Federated credentials released by UmbrellaID are used to access the services and resources of this pilot.- To read and analyse hdf5 data sets we used the H5Glance library developed by the European-XFEL and the h5py Python library.--- Architecture of the PaNOSC Data Transfer Pilot (WP6) Import the necessary libraries ###Code import h5py from h5glance import H5Glance # Import auxiliary libraries import os, io import requests import matplotlib.pyplot as plt from PIL import Image import os.path from os import path import time # Import additional Onedata library from fs.onedatafs import OnedataFS ###Output _____no_output_____ ###Markdown Get the .h5 datasets location in the shared volume space ###Code # Listing all volume space headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/spaces/" %os.environ['ONEPROVIDER_HOST'] r = requests.get(url=url, headers=headers) print("Response from the provider: %s" %os.environ['ONEPROVIDER_HOST']) print(r.json()) spaceId = r.json()[0]['spaceId'] space_name = r.json()[0]['name'] # Select the space to use print("\nSpaceID = %s" %spaceId) print("Space Name = %s" %space_name) ###Output Response from the provider: cesnet-op.onezone-panosc.egi.eu [{'spaceId': '68eb4419ef3e5c6a9f4269d1ee1fbcb4ch3eee', 'name': 'CESNET-space'}, {'spaceId': 'd5296fa3a10e921a985f23777efdaa87chc36d', 'name': 'PaNOSC-WP6'}] SpaceID = d5296fa3a10e921a985f23777efdaa87chc36d Space Name = PaNOSC-WP6 ###Markdown Listing available datasets in the PaNOSC space ###Code # Create connection to ONEPROVIDER_HOST odfs = OnedataFS(os.environ['ONEPROVIDER_HOST'], os.environ['ONECLIENT_ACCESS_TOKEN'], force_proxy_io=True) folder_name = "samples/twinmic/" # Set the path in OZ where the .h5 datasets is stored OZ_path = os.path.join('/', space_name, folder_name) print("\nDatasets path: %s" % OZ_path) # Open remote dir s1 = odfs.opendir(OZ_path) # List contents from remote dir s1.listdir('') ###Output Datasets path: /PaNOSC-WP6/samples/twinmic/ ###Markdown Reading the specific .h5 dataset from the PaNOSC space ###Code file_name = "twinmic_scan_41.h5" h5file_name = os.path.join(OZ_path, file_name) print("Loading datasets: [%s] from the volume space: [%s]" %(h5file_name,space_name) ) %time f = odfs.openbin(h5file_name, mode='r') os.environ['H5PY_DEFAULT_READONLY'] = '1' fh5py = h5py.File(f) H5Glance(fh5py) ###Output Loading datasets: [/PaNOSC-WP6/samples/twinmic/twinmic_scan_41.h5] from the volume space: [PaNOSC-WP6] CPU times: user 6 µs, sys: 1e+03 ns, total: 7 µs Wall time: 15.7 µs ###Markdown Analyse the dataset and plot results ###Code # This code snippet simulates the processing of datasets read from the volume space. # As result of this simulation, the PNG file will be uplodaded from the notebooks space # in the default OneProvider (CESNET-MCC). get_millisecs = lambda: int(round(time.time() * 1000)) get_millisecs() print("Available keys: %s" % fh5py.keys()) plt_file="result_%s.png" %get_millisecs() if ("triggers" in fh5py.keys()): # Getting datasets triggers = fh5py['triggers'] timestamp = fh5py['triggers_timestamp'] sample_stage_x = fh5py['sample_motors/sample_stage_x'] sample_x_pos = fh5py['sample_motors/sample_x_pos'] sample_y_pos = fh5py['sample_motors/sample_y_pos'] sample_stage_y = fh5py['sample_motors/sample_stage_y'] print("- triggers: %s" %triggers[:]) print("- timestamp: %s" %timestamp[:]) print("- sample_stage_x: %s" %sample_stage_x[:]) print("- sample_x_pos: %s" %sample_x_pos[:]) print("- sample_y_pos: %s" %sample_y_pos[:]) print("- sample_stage_y: %s" %sample_stage_y[:]) # Define data, grid, legend, and save the plot plt.plot(triggers[:], sample_stage_x[:], label='triggers vs. sample_stage_x') plt.plot(triggers[:], sample_stage_y[:], label='triggers vs. sample_stage_y') plt.grid() plt.legend() plt.savefig(plt_file, dpi = (100)) else: print ("Working in progress..") #fh5py.close() #f.close() ###Output Available keys: <KeysViewHDF5 ['andor', 'dante', 'sample_motors', 'triggers', 'triggers_timestamp']> - triggers: [ 1 2 3 ... 2599 2600 2601] - timestamp: [1.57496154e+09 1.57496154e+09 1.57496154e+09 ... 1.57496179e+09 1.57496179e+09 1.57496179e+09] - sample_stage_x: [26200. 26250. 26300. ... 28600. 28650. 28700.] - sample_x_pos: [0. 0. 0. ... 0. 0. 0.] - sample_y_pos: [0. 0. 0. ... 0. 0. 0.] - sample_stage_y: [25800. 25800. 25800. ... 28300. 28300. 28300.] ###Markdown Transferring results back to the PaNOSC spaceThis code snipped is used as example to:- Simulate the processing of datasets from the volume space, and - Trigger the data transfer in the back-up provider ###Code if path.exists(plt_file): # Reading local .PNG file im = Image.open(plt_file) byte_array = io.BytesIO() # Define the path in the specific oneprovider where store the results of simulation replica = OZ_path + plt_file # Create the absolute path in the oneprovider and the file f1 = odfs.create(replica) f1 = odfs.openbin(replica, mode='w+') # Store bytes in the volume space of the os.environ['ONEPROVIDER_HOST'] provider im.save(byte_array, format='PNG') f1.write(byte_array.getvalue()) # Close remote file f1.close() # Replicate a file specified by absolute path to a specified provider. # This operation is asynchronous and it can take a long time depending # on the size of the data to move. # If the path parameter specifies a folder, entire folder is replicated # to requested provider. # # API documentation here: # https://onedata.org/#/home/api/stable/oneprovider?anchor=operation/replicate_file REPLICA_ONEPROVIDER_HOST = "op-desy.onezone-panosc.egi.eu" # Retrieve the REPLICA_ONEPROVIDER_HOST id headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/configuration" %REPLICA_ONEPROVIDER_HOST r = requests.get(url=url, headers=headers) providerId = r.json()['providerId'] # Replicate file by path to the REPLICA_ONEPROVIDER_HOST headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/replicas%s?provider_id=%s" %(os.environ['ONEPROVIDER_HOST'], replica, providerId) r = requests.post(url=url, headers=headers) print(r.json()) transferId = r.json()['transferId'] # Get status of the specific data transfer process headers = {'X-Auth-Token': os.environ['ONECLIENT_ACCESS_TOKEN']} url = "https://%s/api/v3/oneprovider/transfers/%s" %(os.environ['ONEPROVIDER_HOST'], transferId) r = requests.get(url=url, headers=headers) if (r.json()['transferStatus'] == "completed"): print("Data transfer process has COMPLETED!") print("- Status code \t= %s" %r.status_code) print("- TransferId \t= %s" %transferId) print("- Replica file \t= %s" %replica) print("- Replica host \t= %s" %REPLICA_ONEPROVIDER_HOST) print("") print(r.json()) else: print("\n[%s] - Transfer request SUBMITTED" %r.status_code) print(r.json()) else: print("Nothing to do!") ###Output {'transferId': '3a07dbb530344c25937480058fcd1113ch7a58'} [200] - Transfer request SUBMITTED {'userId': 'cc9d00d334c27e9eb55fd44a9abe91e2', 'transferStatus': 'scheduled', 'targetProviderId': 'ba3a0df5f932a846997c5f77ef4c83aecheda2', 'startTime': 0, 'scheduleTime': 1591622922, 'rerunId': None, 'replicationStatus': 'scheduled', 'replicatingProviderId': 'ba3a0df5f932a846997c5f77ef4c83aecheda2', 'replicaEvictionStatus': 'skipped', 'path': '/PaNOSC-WP6/samples/twinmic/result_1591622918738.png', 'mthHist': {}, 'minHist': {}, 'lastUpdate': 0, 'invalidationStatus': 'skipped', 'hrHist': {}, 'finishTime': 0, 'filesTransferred': 0, 'filesToProcess': 0, 'filesReplicated': 0, 'filesProcessed': 0, 'filesInvalidated': 0, 'fileReplicasEvicted': 0, 'fileId': '000000000052CFEE67756964233432383633663234623463643966363133613734623536343861623037666233636837363931236435323936666133613130653932316139383566323337373765666461613837636863333664', 'failedFiles': 0, 'evictingProviderId': None, 'dyHist': {}, 'callback': None, 'bytesTransferred': 0, 'bytesReplicated': 0} ###Markdown Working with HDF5 FilesIn this notebook we show how to load the North Su HDF5 file and plot a single 30-s trace. Setup ###Code import matplotlib as mpl mpl.rc("savefig", dpi=300) mpl.rcParams['axes.linewidth'] = 0.5 mpl.rcParams['font.size'] = 7.5 %matplotlib inline %config InlineBackend.figure_format = 'svg' ###Output _____no_output_____ ###Markdown Load HDF5 File ###Code import h5py f = h5py.File('suzette_2015.h5', 'r') f.items() ###Output _____no_output_____ ###Markdown Plot Raw A/D Trace ###Code import numpy as np import matplotlib.pyplot as plt gain08_ad = f['gain08_ad'][1,:] fig = plt.figure(frameon = False, figsize=(7,4.5)) ax = plt.subplot() pl1 = plt.plot(gain08_ad) plt.xlim([0, len(gain08_ad)]) ax.set_xlabel('Sample Number') ax.set_ylabel('A/D Value'); ###Output _____no_output_____ ###Markdown Working with HDF5 FilesIn this notebook we show how to load an HDF5 file and plot a single gather. Setup ###Code import matplotlib as mpl mpl.rc("savefig", dpi=300) mpl.rcParams['axes.linewidth'] = 0.5 mpl.rcParams['font.size'] = 7.5 %matplotlib inline %config InlineBackend.figure_format = 'svg' ###Output _____no_output_____ ###Markdown Load HDF5 File ###Code import h5py f = h5py.File('seq024shot2102.h5', 'r') f.items() ###Output _____no_output_____ ###Markdown Plot Raw Traces ###Code import numpy as np import matplotlib.pyplot as plt traces = f['traces'][:] fs = 2000.0 # sample rate (Hz) nsamp = traces.shape[1] # number of samples in each trace fig = plt.figure(frameon = False, figsize=(7,3.5)) ax = plt.subplot() im1 = plt.imshow(traces.transpose(), aspect='auto', vmin=-10, vmax=10, cmap='bwr', interpolation='nearest', extent=[0, 240, nsamp/fs, 0]) plt.ylim([4, 0]) plt.xticks(np.arange(0,241,20)) ax.set_xlabel('Channel Number') ax.set_ylabel('Time (s)') cbar = plt.colorbar() cbar.set_label('Pressure (Pa)') for i in np.arange(0.5, 4, 0.5): plt.plot([0, 240], [i, i], color='black', linestyle=':', linewidth=0.25); ###Output _____no_output_____
dev_nb/x_003a_rect_images.ipynb
###Markdown Caltech 101 Create validation set The first step will be to create a dataset from our files. We need to separate a definite amount of files to be used as our validation set. We will do this randomly by setting a percentage apart, in this case 0.2. ###Code classes = ["airplanes", "Motorbikes", "BACKGROUND_Google", "Faces", "watch", "Leopards", "bonsai", "car_side", "ketch", "chandelier", "hawksbill", "grand_piano", "brain", "butterfly", "helicopter", "menorah", "trilobite", "starfish", "kangaroo", "sunflower", "ewer", "buddha", "scorpion", "revolver", "laptop", "ibis", "llama", "minaret", "umbrella", "electric_guitar", "crab", "crayfish",] np.random.seed(42) train_ds,valid_ds = FilesDataset.from_folder(PATH, test_pct=0.2) x = train_ds[1114][0] def xi(): return Image(train_ds[1114][0]) classes = train_ds.classes c = len(classes) len(train_ds),len(valid_ds),c ###Output _____no_output_____ ###Markdown Rectangular affine fix ###Code show_image(x, figsize=(6,3), hide_axis=False) print(x.shape) rot_m = np.array(rotate.func(40.)); rot_m rotate(xi(), 40.).show(figsize=(6,3)) #export def affine_mult(c,m): if m is None: return c size = c.size() _,h,w,_ = size m[0,1] *= h/w m[1,0] *= w/h c = c.view(-1,2) c = torch.addmm(m[:2,2], c, m[:2,:2].t()) return c.view(size) nb_002.affine_mult = affine_mult rotate(xi(), 40.).show(figsize=(6,3)) ###Output _____no_output_____ ###Markdown Crop with padding Now we are going to add padding or crop automatically according to a desired final size. The best way to do this is to integrate both transforms into the same function. We will do the padding necessary to achieve a _size x size_ (square) image. If _size_ is greater than either the height or width dimension of our image, we know we will need to add padding. If _size_ is smaller than either _height_ or _width_ dimension of our image, we will have to crop. We might have to do one, the other, both or neither. In this example we are only adding padding since both our _height_ and _width_ are smaller than 300, our desired dimension for the new _height_ and _width_.As is the case with our original function, we can add a *row_pct* or *col_pct* to our transform to focus on different parts of the image instead of the center which is our default.**Crop_pad**Crop_pad crops and pads our image to create an output image according to a given target size._Parameters_1. **Size** What is the target size of each side in pixels. If only one number *s* is specified, image is made square with dimensions *s* \* *s*. Domain: Positive integers. 2. **Padding_mode** What is the type of padding used in the transform. Domain: 'reflect', 'zeros', 'border' 3. **Row_pct** Determines where to cut our image vertically on the bottom and top when cropping (which rows are left out). If <0.5, more rows will be cut in the top than in the bottom and viceversa (varies linearly). Domain: Real numbers between 0 and 1. 4. **Col_pct** Determines where to cut our image horizontally on the left and right when cropping (which columns are left out). If <0.5, more rows will be cut in the left than in the right and viceversa (varies linearly). Domain: Real numbers between 0 and 1.Note: While experimenting take into account that this example image contains a thin black border in the original. This affects our transforms and can be seen when we use reflect padding. ###Code class TfmCrop(TfmPixel): order=99 @TfmCrop def crop_pad(x, size, padding_mode='reflect', row_pct:uniform = 0.5, col_pct:uniform = 0.5): size = listify(size,2) rows,cols = size if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] return x.contiguous() # without this, get NaN later - don't know why crop_pad(xi(), 300, row_pct=0.,col_pct=0., padding_mode='constant').show() crop_pad(xi(), 150).show() crop_pad(xi(), 150, row_pct=0.,col_pct=0.98, padding_mode='constant').show() tfm = crop_pad(size=100, row_pct=(0,1.), col_pct=(0,1.)) _,axes = plt.subplots(1,4, figsize=(12,3)) for ax in axes.flat: tfm.resolve() tfm(xi()).show(ax) ###Output _____no_output_____ ###Markdown Combine crop/resize Next, we are going to combine our cropping and padding with the resize operation. In other words, we will get a picture, and crop/pad it in such a way that we get our desired size. It is similar to our previous transform only this time the final dimensions don't have to be square. This gives us more flexibility since our network architecture might take rectangular pictures as input.First, we will get the target dimensions. For this we have built *get_crop_target*. This function takes three arguments: a target_px, a target_aspect and a multiple. *target_px* is our base dimension, *target_aspect* is our relation between width and height and _mult_ is what do we need our dimensions to be a multiple of. To understand this better, let's take our example where our values are *target_px*=220, *target_aspect*=2., _mult_=32 (default). In plain text we are telling our function: return the dimensions that meet a ~220\*220 area image with a width twice as long as the height and where height and width are multiples of 32. We are now going to transform our image to our desired dimensions by using crop or padding. Before we crop or pad we will make an intermediate transform that will allow us to later get our output image with the desired dimensions. Let's call our initial dimensions h_i, w_i, our intermediate dimensions h_m, w_m and our output dimensions h_o, w_o. Our objective will be to get our output image by either cropping or padding, but not both. We will first enlarge or reduce our original image. *get_resize_target* will enlarge or reduce our input image (keeping the shape or h_i/w_i constant) until one of the dimensions is equal to the corresponding final output dimension (i.e. h_m=h_o or w_m=w_o). But how does it know which dimension to equate? Let's think about this in detail. If we intend to crop, our intermediate image's area has to be larger than our output image (since we are going to crop out some pixels) and if we intend to pad, our intermediate image's area has to be smaller than our output image (since we will add some pixels). This means that the dimension we will chose to equate will depend on the relationship between the ratios h_i/h_o and w_i/w_o. If we want to **crop** we will want to equate the dimension with **the smallest ratio** since that would mean that (h_m, w_m) >= (h_o, w_o) which is exactly what we want (a larger area). Conversely if we want to **pad**, we will equate the dimension with **the largest ratio** since that will guarantee that (h_m, w_m) <= (h_o, w_o) (a smaller area).As an example say we have our image with dimensions h_i = 192 and w_i = 128 and our target dimensions are h_o=160 w_o=320. That is, we have to turn a vertical rectangle into a horizontal rectangle. We can do this in to ways:1. Padding the borders so we make our image wider2. Cropping the top and bottom so we squash our image and make it widerIf we intend to crop, our intermediate dimensions will be (h_m, w_m) = (480, 320). If we intend to pad (h_m, w_m) = (160, 107). Note that 480/320 ≈ 160/107 ≈ 192/128, that is our intermidiate image's aspect ratio is always equal to our input image's aspect ratio. ###Code #export def round_multiple(x, mult): return (int(x/mult+0.5)*mult) def get_crop_target(target_px, target_aspect=None, mult=32): target_px = listify(target_px, 2) target_r,target_c = target_px if target_aspect: target_r = math.sqrt(target_r*target_c/target_aspect) target_c = target_r*target_aspect return round_multiple(target_r,mult),round_multiple(target_c,mult) get_crop_target(220) get_crop_target((220,110)) crop_target = get_crop_target(220, 2.); target_r,target_c = crop_target crop_target, target_r*target_c _,r,c = x.shape; x.shape #export @partial(Transform, order=99) def crop_pad(img, size=None, mult=32, padding_mode=None, row_pct:uniform = 0.5, col_pct:uniform = 0.5): aspect = img.aspect if hasattr(img, 'aspect') else 1. if not size and hasattr(img, 'size'): size = img.size if not padding_mode: if hasattr(img, 'sample_kwargs') and ('padding_mode' in img.sample_kwargs): padding_mode = img.sample_kwargs['padding_mode'] else: padding_mode='reflect' if padding_mode=='zeros': padding_mode='constant' rows,cols = get_crop_target(size, aspect, mult=mult) x = img.px if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] img.px = x.contiguous() # without this, get NaN later - don't know why return img img = xi() img.aspect = 2 img = crop_pad(img, 220) img.show(figsize=(9,3)) img.shape ###Output _____no_output_____ ###Markdown We are now going to transform our image to our desired dimensions by using crop or padding. Before we crop or pad we will make an intermediate transform that will allow us to later get our output image with the desired dimensions. Let's call our initial dimensions h_i, w_i, our intermediate dimensions h_m, w_m and our output dimensions h_o, w_o.Our objective will be to get our output image by cropping or padding but not both. To achive this, we will first enlarge or reduce our original image. **get_resize_target will enlarge or reduce our input image (keeping the shape or h_i/w_i constant) until one of the dimensions is equal to the corresponding final output dimension (i.e. h_m=h_o or w_m=w_o)**. But how does it know which dimension to equate? We can figure this out intuitively. If we intend to crop, our intermediate image's area has to be larger than our output image (since we are going to crop out some pixels) and if we intend to pad, our intermediate image's area has to be smaller than our output image (since we will add some pixels). This means that the dimension we will chose to equate will depend on the relationship between the ratios h_i/h_0 and w_i/w_o. If we want to **crop** we will want to equate the dimension with **the smallest ratio** since that would mean that (h_m, w_m) >= (h_o, w_o) which is exactly what we want (a larger area). Conversely if we want to **pad**, we will equate the dimension with **the largest ratio** since that will guarantee that (h_m, w_m) <= (h_o, w_o) (a smaller area).As an example say we have our image with dimensions h_i = 192 and w_i = 128 and our target dimensions are h_o=160 w_o=320. That is, we have to turn a vertical rectangle into a horizontal rectangle. We can do this in to ways:1. Padding the borders so we make our image wider2. Cropping the top and bottom so we squash our image and make it widerIf we intend to crop, our intermediate dimensions will be (h_m, w_m) = (480, 320). If we intend to pad (h_m, w_m) = (160, 107). Note that 480/320 ≈ 160/107 ≈ 192/128. ###Code r_ratio = r/target_r c_ratio = c/target_c # min -> crop; max -> pad ratio = max(r_ratio,c_ratio) r_ratio,c_ratio,ratio r2,c2 = round(r/ratio),round(c/ratio); r2,c2 #export def get_resize_target(img, crop_target, do_crop=False): if crop_target is None: return None ch,r,c = img.shape target_r,target_c = crop_target ratio = (min if do_crop else max)(r/target_r, c/target_c) return ch,round(r/ratio),round(c/ratio) get_resize_target(x, crop_target, False) get_resize_target(x, crop_target, True) #export @partial(Transform, order=TfmAffine.order-2) def resize_image(x, *args, **kwargs): return x.resize(*args, **kwargs) def _resize(self, size=None, do_crop=False, mult=32): assert self._flow is None if not size and hasattr(self, 'size'): size = self.size aspect = self.aspect if hasattr(self, 'aspect') else None crop_target = get_crop_target(size, aspect, mult=mult) target = get_resize_target(self, crop_target, do_crop) self.flow = affine_grid(target) return self Image.resize=_resize img = xi() img.aspect = 2 img.resize(220) img.show(figsize=(9,3)) img.shape img = xi() img.aspect = 2 img.resize(220, do_crop=True) img.show(figsize=(9,3)) img.shape #export def is_listy(x)->bool: return isinstance(x, (tuple,list)) def apply_tfms(tfms, x, do_resolve=True, xtra=None, aspect=None, size=None, padding_mode='reflect', **kwargs): if not tfms: return x if not xtra: xtra={} tfms = sorted(listify(tfms), key=lambda o: o.tfm.order) if do_resolve: resolve_tfms(tfms) x = Image(x.clone()) x.set_sample(padding_mode=padding_mode, **kwargs) x.aspect = aspect x.size = size for tfm in tfms: if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm]) x = tfm(x) return x.px nb_002.apply_tfms = apply_tfms import nb_002b nb_002b.apply_tfms = apply_tfms tfms = [resize_image(size=crop_target), rotate(degrees=(40.,40.))] img = apply_tfms(tfms, x) show_image(img, figsize=(6,3)) crop_target,img.shape tfms = [resize_image(size=crop_target, do_crop=True), rotate(degrees=(40.,40.))] img = apply_tfms(tfms, x, aspect=2) show_image(img, figsize=(6,3)) img.shape tfms = [resize_image(size=220), rotate(degrees=(40.,40.))] img = apply_tfms(tfms, x, aspect=2) show_image(img, figsize=(6,3)) get_crop_target(220, 2),img.shape tfms = [rotate(degrees=(40.,40.)), crop_pad(size=220)] img = apply_tfms(tfms, x, aspect=2) show_image(img, figsize=(6,3)) img.shape tfms = [rotate(degrees=(40.,40.)), resize_image(), crop_pad()] img = apply_tfms(tfms, x, aspect=2, size=220) show_image(img, figsize=(6,3)) get_crop_target(220,2), img.shape ###Output _____no_output_____ ###Markdown 1 ###Code def resize_crop(size=None, do_crop=False, mult=32, rand_crop=False): crop_kw = {'row_pct':(0,1.),'col_pct':(0,1.)} if rand_crop else {} return [resize_image(size=size, do_crop=do_crop, mult=mult), crop_pad(size=size, mult=mult, **crop_kw)] tfms = [rotate(degrees=(40.,40.)), *resize_crop()] img = apply_tfms(tfms, x, aspect=2, size=220) show_image(img, figsize=(6,3)) get_crop_target(220,2), img.shape tfms = [rotate(degrees=(40.,40.)), *resize_crop(do_crop=True)] img = apply_tfms(tfms, x, size=220, aspect=2) show_image(img, figsize=(6,3)) img.shape tfms = [rotate(degrees=(40.,40.)), *resize_crop(do_crop=False)] img = apply_tfms(tfms, x, size=220, aspect=2, padding_mode='zeros') show_image(img, figsize=(6,3)) img.shape ###Output _____no_output_____ ###Markdown Fit Let's see how our transforms look for different values of zoom, rotate and crop_pad. Transform ###Code #export def rand_zoom(*args, **kwargs): return zoom(*args, row_pct=(0,1), col_pct=(0,1), **kwargs) def rand_crop(*args, **kwargs): return crop_pad(*args, row_pct=(0,1), col_pct=(0,1), **kwargs) def zoom_crop(scale, do_rand=False, p=1.0): zoom_fn = rand_zoom if do_rand else zoom crop_fn = rand_crop if do_rand else crop_pad return [zoom_fn(scale=scale, p=p), crop_fn()] tfms = [ rotate(degrees=(-20,20.)), rand_zoom(scale=(1.,1.95)), *resize_crop(size=100, rand_crop=True, do_crop=False) ] _,axes = plt.subplots(1,4, figsize=(12,3)) for ax in axes.flat: show_image(apply_tfms(tfms, x, padding_mode='zeros'), ax) tfms = [ rotate(degrees=(-20,20.)), rand_zoom(scale=(1.,1.95)), *resize_crop(size=100, rand_crop=True, do_crop=True) ] _,axes = plt.subplots(1,4, figsize=(12,3)) for ax in axes.flat: show_image(apply_tfms(tfms, x), ax) ###Output _____no_output_____ ###Markdown Fit Finally, with our choice of transforms and parameters we are going to fit our Darknet model and check our results. To fit our model we will need to resize our images to have the same size so we can feed them in batches to our model. We face the same decisions as before. In this case we chose to pad our images (since in \_apply_affine do_crop default is False). If we wanted to crop instead, we can easily add do_crop=True to train_tds. We also decided to make our images square, with dimension size x size. If we wanted a rectangle with width to height ratio *a* we could have added aspect=*a* to train_ds. ###Code [PIL.Image.open(fn).size for fn in np.random.choice(train_ds.x, 5)] size = 150 train_tfms = [ rotate(degrees=(-20,20.)), rand_zoom(scale=(1.,1.5)), *resize_crop(size=size, rand_crop=True, do_crop=True) ] valid_tfms = [ *resize_crop(size=size, rand_crop=False, do_crop=True) ] _,axes = plt.subplots(1,4, figsize=(10,5)) for ax in axes.flat: show_image(apply_tfms(train_tfms, x), ax) show_image(apply_tfms(valid_tfms, x, size=size)) bs = 128 valid_tds = DatasetTfm(valid_ds, valid_tfms, padding_mode='zeros') data = DataBunch(valid_tds, valid_tds, bs=bs, num_workers=0) xb,yb = next(iter(data.train_dl)) b = xb.transpose(1,0).reshape(3,-1) data_mean=b.mean(1).cpu() data_std=b.std(1).cpu() data_mean,data_std show_image_batch(data.train_dl, train_ds.classes, 4) valid_tds = DatasetTfm(valid_ds, valid_tfms, padding_mode='zeros') train_tds = DatasetTfm(train_ds, train_tfms, padding_mode='zeros') norm,denorm = normalize_funcs(data_mean,data_std) data = DataBunch(train_tds, valid_tds, bs=bs, num_workers=12, tfms=norm) len(data.train_dl),len(data.valid_dl) model = Darknet([1, 2, 4, 4, 2], num_classes=c, nf=16) learn = Learner(data, model) opt_fn = partial(optim.SGD, momentum=0.9) learn.fit(1, 0.1, opt_fn=opt_fn) learn.fit(1, 0.2, opt_fn=opt_fn) learn.fit(5, 0.4, opt_fn=opt_fn) learn.fit(5, 0.1, opt_fn=opt_fn) learn.fit(5, 0.01, opt_fn=opt_fn) ###Output _____no_output_____
docs/source/include/notebooks/database creation.ipynb
###Markdown Database creation from gffs downloaded from pseudomonas.com ###Code from gffutils import create_db ifile="/home/grotec/repos/SBW25DB/data/pseudomonas.com/Pseudomonas_fluorescens_SBW25_116.gff" db = create_db(ifile, ":memory:", id_spec="ID", force=True, gtf_gene_key="gene_id", gtf_transcript_key="transcript_id", verbose=True, checklines=0, merge_strategy='error', _keep_tempfiles=True, ) db.conn() db['gene1458068'] list(db.all_features()) import sqlite3 connection = sqlite3.connect("/home/grotec/repos/SBW25DB/sqlite/Pseudomonas_fluorescens_SBW25_116_annotations.sqlite.db") cursor = connection.cursor() cursor.execute("tables;") cursor.fetchall() cursor.execute("select name from sqlite_master where type='table';") tables=cursor.fetchall() tables cursor.execute("select * from information_schema.columns where table_name = 'features';") cursor.fetchall()[1] %load_ext sql %sql sqlite:////home/grotec/repos/SBW25DB/sqlite/Pseudomonas_fluorescens_SBW25_116_annotations.sqlite.db %sql .headers on %%sql mode csv ###Output * sqlite:////home/grotec/repos/SBW25DB/sqlite/Pseudomonas_fluorescens_SBW25_116_annotations.sqlite.db (sqlite3.OperationalError) near "mode": syntax error [SQL: mode csv] (Background on this error at: http://sqlalche.me/e/e3q8)
basic/basic.ipynb
###Markdown 基本課題 1. 任意の文字列 (xxx) を標準入力で受け取り,"Hello, xxx!" と出力せよ。 2. 任意の整数を標準入力で受け取り,"100" との和差積商を出力せよ。 3. 1から50のリストを作って,その最大値,最小値,合計値,平均値を出力せよ。 4. 3. で作ったリストに 51から100のリストを追加せよ。また,そのリストから30から70までの偶数要素のみを出力せよ。 5. 講習参加者の姓を key, 学年を value とする辞書を作り,全要素を for 文を用いて出力せよ。 6. 1から100の範囲で,3で割り切れる場合は"Fizz",5で割り切れる場合は"Buzz",そのどちらでも割り切れる場合は"FizzBuzz",いずれでもない場合はそのまま数字を出力せよ。 出力例```bash1, 2, "Fizz", 4, "Buzz", "Fizz", ..., 14, "FizzBuzz", 16, ...``` 7. 2つの自然数の最大公約数を求める関数を実装せよ。 8. `address-tab.txt`を読み込み,先頭から5行のみを出力せよ。また,タブをスペースに変換した結果を `address-space.txt` に書き込め。 9. モンテカルロ法 (10,000点) を用いて円周率の近似値を求めよ。なお `NumPy` を用いること。> 参考:[NumPy 公式サイト](https://docs.scipy.org/doc/numpy/) 10. 以下のセルを実行して得られるデータ **(freq, spec)** を `Matplotlib` を用いて描画せよ。なお,タイトル,ラベル,凡例もつけること。> 参考:[Matplotlib 公式サイト](https://matplotlib.org/) ###Code import numpy as np SAMP_NUM = 256 SAMP_INT = 0.01 f1, f2 = 10, 20 t = np.arange(0, SAMP_NUM * SAMP_INT, SAMP_INT) signal = np.sin(2*np.pi*f1*t) + np.sin(2*np.pi*f2*t) + np.random.randn(SAMP_NUM) freq = np.linspace(0, 1.0/SAMP_INT, SAMP_NUM) spec = np.abs(np.fft.fft(signal)) ###Output _____no_output_____ ###Markdown 数学相关命令 求方程$2x^{5\;\;} -3x^3 -71x^2 -9x+13=0$的解 ###Code p = [2,0,-3,71,-9,13]; x = roots(p) ###Output ans = '9.6.0.1072779 (R2019a)' x = -3.4914 + 0.0000i 1.6863 + 2.6947i 1.6863 - 2.6947i 0.0594 + 0.4251i 0.0594 - 0.4251i ###Markdown 求解线形方程组$$\begin{cases}2x_1 + 3x_2 - x_3 &= 2 \\8x_1 + 2x_2 + 3x_3 &= 4 \\45x_1 + 3x_2 + 9x_3 &= 23 \\ \end{cases}$$ ###Code a = [2,3,-1; 8,2,3; 45,3,9]; b = [2;4;23]; x = inv(a)*b ###Output x = 0.5531 0.2051 -0.2784 ###Markdown 求定积分$\int_0^1 x \log(1+x) dx$ ###Code f = @(x)x.*log(1+x) integral(f,0,1) ###Output f = function_handle with value: @(x)x.*log(1+x) ans = 0.2500 ###Markdown 矩阵相关 矩阵的拼接 ###Code a = [1,2,3; 4,5,6; 7,8,9]; b = [ -1,-2,-3; -4,-5,-6; -7,-8,-9; ]; c = [ a,b; b,a ] ###Output c = 1 2 3 -1 -2 -3 4 5 6 -4 -5 -6 7 8 9 -7 -8 -9 -1 -2 -3 1 2 3 -4 -5 -6 4 5 6 -7 -8 -9 7 8 9 ###Markdown 冒号表达式与linspace ###Code 1:10:100 linspace(1,100,10) % 10个数,9个间隔,(100-1)/9 = 11 ###Output ans = 1 12 23 34 45 56 67 78 89 100 ###Markdown 以上的语句等价于 ###Code 1:11:100 ###Output ans = 1 12 23 34 45 56 67 78 89 100 ###Markdown 矩阵的引用 ###Code a a(3,2) = 200; a ###Output a = 1 2 3 4 5 6 7 200 9 ###Markdown matlab 按列存储,序号与下标一一对应,对于$m \times n$的矩阵(i,j)的下标为$(j-1)m+i$ ###Code a(4) ###Output ans = 2 ###Markdown 子矩阵 ###Code a(:,1) a(1:2,1) a(:) ###Output ans = 1 4 7 2 5 200 3 6 9 ###Markdown 基本課題 1. 任意の文字列 (xxx) を標準入力で受け取り,"Hello, xxx!" と出力せよ。 2. 任意の整数を標準入力で受け取り,"100" との和差積商を出力せよ。 3. 1から50のリストを作って,その最大値,最小値,合計値,平均値を出力せよ。 4. 3. で作ったリストに 51から100のリストを追加せよ。また,そのリストから30から70までの偶数要素のみを出力せよ。 5. 講習参加者の姓を key, 学年を value とする辞書を作り,全要素を for 文を用いて出力せよ。 6. 1から100の範囲で,3で割り切れる場合は"Fizz",5で割り切れる場合は"Buzz",そのどちらでも割り切れる場合は"FizzBuzz",いずれでもない場合はそのまま数字を出力せよ。 出力例```bash1, 2, "Fizz", 4, "Buzz", "Fizz", ..., 14, "FizzBuzz", 16, ...``` 7. 2つの自然数の最大公約数を求める関数を実装せよ。 8. `address-tab.txt`を読み込み,先頭から5行のみを出力せよ。また,タブをスペースに変換した結果を `address-space.txt` に書き込め。 9. モンテカルロ法 (10,000点) を用いて円周率の近似値を求めよ。なお `NumPy` を用いること。> 参考:[NumPy 公式サイト](https://docs.scipy.org/doc/numpy/) 10. 以下のセルを実行して得られるデータ **(freq, spec)** を `Matplotlib` を用いて描画せよ。なお,タイトル,ラベル,凡例もつけること。> 参考:[Matplotlib 公式サイト](https://matplotlib.org/) ###Code import numpy as np SAMP_NUM = 256 SAMP_INT = 0.01 f1, f2 = 10, 20 t = np.arange(0, SAMP_NUM * SAMP_INT, SAMP_INT) signal = np.sin(2*np.pi*f1*t) + np.sin(2*np.pi*f2*t) + np.random.randn(SAMP_NUM) freq = np.linspace(0, 1.0/SAMP_INT, SAMP_NUM) spec = np.abs(np.fft.fft(signal)) ###Output _____no_output_____
group_files/cpompa/notebooks/5-nba-filter-5-years-graphs.ipynb
###Markdown ------------- Open Saved JSON ###Code with open('./_players_all_data.json') as json_file: data = json.load(json_file) # Player positions player_positions = pd.read_csv('../../datasets/nba_positions_no_team.csv') five_year_all_players = pd.read_json(data) twenty_years_all_players_raw = five_year_all_players[five_year_all_players['SEASON_ID'] > '1999-00'] twenty_years_all_players_raw = twenty_years_all_players_raw.join(player_positions.set_index('PLAYER_NAME'), on='PLAYER_NAME') twenty_years_all_players_raw = twenty_years_all_players_raw.dropna(how='any').sort_values(by=['PTS'], ascending=False).drop_duplicates( keep='first').reset_index(drop=True) twenty_years_all_players_raw['PPGP'] = round(twenty_years_all_players_raw['PTS'] / twenty_years_all_players_raw['GP'], 2) twenty_years_all_players = twenty_years_all_players_raw.copy() twenty_years_all_players ### DF for each year. decending first_year = twenty_years_all_players[twenty_years_all_players['YIL'] == 1] second_year = twenty_years_all_players[twenty_years_all_players['YIL'] == 2] third_year = twenty_years_all_players[twenty_years_all_players['YIL'] == 3] fourth_year = twenty_years_all_players[twenty_years_all_players['YIL'] == 4] fifth_year = twenty_years_all_players[twenty_years_all_players['YIL'] == 5] ###Output _____no_output_____ ###Markdown Plot YIL to Total Points First Year Total Points per Position ###Code import plotly.express as px fig = px.scatter(twenty_years_all_players, x="MIN", y="PPGP", log_x=True, hover_name="PLAYER_NAME", hover_data=["PTS", 'PPGP', 'SEASON_ID', 'TEAM_ABBREVIATION'], color="YIL", size='PTS', title="First Year Total Points per Position") fig.show() ###Output _____no_output_____ ###Markdown Second Year Total Points per Position ###Code import plotly.express as px fig = px.scatter(second_year, x="PTS", y="POSITION", log_x=True, hover_name="PLAYER_NAME", hover_data=["PTS"], color="PTS", size='PTS', title="Second Year Total Points per Position") fig.show() ## Remove NaN . Will sway values greatly # first_year = first_year.dropna().sort_values(by=['PTS'], ascending=False).drop_duplicates( keep='first') # first_year test = list(first_year.columns.values) fig = px.scatter_polar(first_year, r="PTS", theta="POSITION", color="PTS", template="plotly_dark", title="First Year Total Points per Position") fig.show() twenty_years_all_players[['YIL','PTS','STL','BLK','REB','MIN','AST','TOV']].columns import plotly.express as px df = px.data.stocks() fig = px.line(twenty_years_all_players, x="MIN", y=twenty_years_all_players[['YIL','PTS','STL','BLK','REB','MIN','AST','TOV']].columns, hover_data={"PLAYER_NAME"}, title='custom tick labels', color="YIL") # fig.update_xaxes( # dtick="M1", # tickformat="%b\n%Y") fig.show() ###Output _____no_output_____ ###Markdown https://plotly.com/python/polar-chart/ ###Code # import plotly.express as px # df = px.data.wind() # fig = px.line_polar(twenty_years_all_players, r="PTS", theta="POSITION", color="YIL", line_close=True, # color_discrete_sequence=px.colors.sequential.Plasma_r, # template="plotly_dark",) # fig.show() # df = px.data.wind() # df ###Output _____no_output_____
docs/notebooks/point_cloud_voxelization.ipynb
###Markdown PointCloud Voxelization 0. Import libraries ###Code import os import topogenesis as tg import numpy as np import pyvista as pv ###Output _____no_output_____ ###Markdown 1. read point cloud from csv file ###Code pc_path = os.path.relpath('../../data/rdam_cloud.csv') pc = tg.cloud_from_csv(pc_path) print(type(pc)) print(pc.bounds) print(pc) ###Output <class 'topogenesis.datastructures.datastructures.cloud'> [[ 9.3198970e+04 4.3618254e+05 -1.7400000e+00] [ 9.3361080e+04 4.3631357e+05 2.6570000e+01]] [[9.3340350e+04 4.3631074e+05 3.1100000e+00] [9.3340650e+04 4.3631029e+05 3.1300000e+00] [9.3340950e+04 4.3630982e+05 3.1300000e+00] ... [9.3360940e+04 4.3624841e+05 3.5800000e+00] [9.3360720e+04 4.3624877e+05 3.4200000e+00] [9.3360970e+04 4.3624895e+05 3.5400000e+00]] ###Markdown 2. regularizing random points into a lattice ###Code l = pc.voxelate([1, 1, 1]) print(type(l)) print(l.unit) print(l.bounds) ###Output <class 'topogenesis.datastructures.datastructures.lattice'> [1 1 1] [[ 93198 436182 -3] [ 93362 436315 28]] ###Markdown 3. plot the pointcloud ###Code # initiating the plotter p = pv.PlotterITK() # ITK plotter for interactivity within the python notebook (itkwidgets library is required) # fast visualization of the point cloud pc.fast_notebook_vis(p) # fast visualization of the lattice l.fast_notebook_vis(p, show_outline=True, show_centroids=True) # Set a camera position p.camera_position = [(0.25, 0.18, 0.5), (0, .1, 0), (0, 1, 0)] # plotting p.show() ###Output _____no_output_____
CUDA10.ipynb
###Markdown ###Code !nvidia-smi !pip install torchvision # Use PyTorch to check versions, CUDA version and cuDNN import torch print("PyTorch version: ") print(torch.__version__) print("CUDA Version: ") print(torch.version.cuda) print("cuDNN version is: ") print(torch.backends.cudnn.version()) !git clone https://github.com/vladimirwest/deepspeech.pytorch.git !apt-get install sox %cd /content !python3 deepspeech.pytorch/data/voxforge.py !cat voxforge_dataset/txt/1-20121125-pgp_ru_0022.txt %cd /content !apt-get install cmake !git clone https://github.com/SeanNaren/warp-ctc.git !export CUDA_HOME="/usr/local/cuda" !mkdir warp-ctc/build import os os.chdir('/content/warp-ctc/build') !cmake .. !make !pip3 install cffi %cd ../pytorch_binding !python setup.py install %cd /content !apt-get install sox libsox-dev libsox-fmt-all !git clone https://github.com/pytorch/audio.git %cd audio !python setup.py install !cd /content !git clone --recursive https://github.com/NVIDIA/apex.git !cd apex && pip install . %cd /content/deepspeech.pytorch !pip install -r requirements.txt %cd /content/deepspeech.pytorch !python train.py --help from google.colab import drive drive.mount('/content/gdrive') !cp "/content/gdrive/My Drive/train.csv" /content !cp "/content/gdrive/My Drive/val.csv" /content !cp "/content/gdrive/My Drive/test_voxforge_kek.csv" /content #!cp "/content/gdrive/My Drive/Models_aug/final.pth" /content %cd /content/deepspeech.pytorch !python3 train.py --train-manifest "/content/train.csv" --val-manifest "/content/val.csv" --cuda --num-workers 0 --augment --checkpoint --save-folder "/content" --model-path "/content/test.pth" ###Output _____no_output_____
machine_learning/lesson 2 - logistic regression/Classification_Logistic_Regression.ipynb
###Markdown > Note: Always open in Colab for the best learning experience. Classification: Logistic RegressionIn the previous lessons, we learned about linear regression and how we can use it to construct a single layer linear neural network to predict a numeric value (i.e., how powerful a Pokemon is given their x features). Regression is great when we want to answer *how much?* or *how many?* questions. In practice, we are often more interested in *classification*: asking *which one?* not *how much?*- Is this customer more likely to *sign up* or *not* for a subscription service?- Does this image contain one of the following, a cat or a dog?- Is this song in the genre of hip hop, pop, or funk?When we want to distinguish two classes (called *binary classification*), we can use a classification technique called logistic regression.In this notebook, we will learn the foundations of logistic regression and demonstrate how to solve binary classification problems using an example--building a logistic regression model to predict whether it will rain the next-day or not. The ideas we introduce here will build on previous material and continue to lay out the fundamental concepts used in deep learning and neural networks, which we will cover in future lessons. Here is the lesson roadmap:1. Introduction to logistic regression2. From linear to logistic regression3. Building a logistic regression classifier: predicting if it rains tomorrow7. Summary Representing categorical data Representing data: a Shina Inu, Retriever, and LabBefore we dive into logistic regression, let's consider how machine learning problems generally represents categorical data. Categorical features represent types of data which may be divided into groups. Examples of categorical features are dog breed, game genre, and educational level. While the latter feature may also be considered in a numerical manner by using exact values for highest grade completed, it is often more informative to categorize such variables into a relatively small number of groups.Consider an example where we want to distinguish 3 different dog breeds--(golden) retrievers, labs, and shiba inus, given 3 features about each dog: height, weight, and fur color. The numeric features are height ($x_1$) and weight ($x_2$), while the categorical feature is fur color ($x_3$), which we determined has 3 colors: black, red, yellow (golden/light gold). To make this categorical feature useful, we need to convert it into a numerical representation. There are two general ways to represent categorical data in numeric terms. Perhaps the most natural choice to is to choose $x_3 \in \{1, 2, 3\}$, where the integers represent the fur colors {black, red, yellow} repectively. This is a great way to compress and store info on a computer, but it's not great for machine learning. Fortunately, great minds got together long ago and invented a simple method to represent categorical data called *one-hot encoding*. A one-hot encoding is a vector with as many components as we have categories. The component corresponding to particular sample's category is set to 1 and all other components are set to 0. So in our case, this translates to:$$x_3 \in \{ (1, 0, 0), (0, 1, 0), (0, 0, 1) \},$$where $x_3$ would be a three-dimensional vector representing the fur color feature with $(1, 0, 0)$ corresponding to "black", (0, 1, 0) to "red", and (0, 0, 1) to "yellow" fur. Challenge: Representing categorical dataNow that you know how to represent categorical data, consider the dog breed example above. We one-hot encoded the fur color feature $x_3$ so that all the features $x_1, x_2, x_3$ were represented by numeric values. Thus, the features ($\mathbf{x}$) were ready to be passed as input to a machine learning model. On the other hand, are the labels $y$ (the dog bread) ready? Are they in the proper format? How should $y$ be *encoded*? Write your answer in the text cell below. Hint: currently, $y \in \{\ \text{retrievers}, \text{labs}, \text{shiba inus} \}$ is a one-dimensional vector with categorical values. Intro to logistic regression Classification: Cat vs DogWith a healthy understanding of categorical encoding, let's dive into the logistic regression method.Logistic regression is perhaps the simplest and most common machine learning algorithm for binary classification tasks. It is a special case of linear regression where the labels variable ($y$) is categorical in nature. It is called "logistic" regression because it uses a *logit* function, called the *sigmoid* function, to estimate the probability of a given class.To motivate logistic regression, let's consider a simple image classification problem--distinguish between cat and dog photos. Here, each image consists of a $2 \times 2$ grayscale image. We can represent each pixel value with a single scalar (number), giving us four features $x_1,x_2,x_3,x_4$. Further, let's assume that each image belongs to one among the categories “cat” and “dog”. However, as we demonstrated in the previous section, we can't use the labels $y$ in its current format ("cat" and "dog"). We need to convert the labels to discrete numerical values (i.e., 0 and 1). To this end, we map each category to an integer, making $y \in \{0,1\}$, where the integers represent $\{\text{cat}, \text{dog}\}$ repsectively. Notice that this is not exactly like *one-hot encoding*, where the one-dimensional vector is converted into a multi-dimensional vector with dimensions equivalent to the number of classes in the labels $y$. Instead, we used the simpler (first) method we discussed in the previous section: encoding each category as a numerical value, in this case $\{0, 1\}$ corresponding to $\{\text{cat}, \text{dog}\}$. When we only need to encode two categories (called binary categorization), we don't have to use one-hot encoding. However, we do need to encode the data numerically. Specifically, among the category labels, we need to assign 0 to one category and 1 to the other. From linear to logistic regression Linear vs Logistic Regression | Source: DatacampNow that we know how labels are properly *encoded*, let's demonstrate the connection between linear and logistic regression.When we are doing linear regression the equation is as follows:$$\hat{\mathbf{y}} = \mathbf{w} \mathbf{X} + b,\tag{1}$$where the linear model learns the most *optimal* parameter values for the *weights* ($\mathbf{w}$) and *bias* term ($b$). The linear regression method is great when we want to predict continuous numerical data, but not so good when we need to distinguish between classes. To make a binary logistic classifier to distinguish between cat and dog photos, we need to convert the predictions ($\hat{\mathbf{y}}$) into probabilities ($\hat{\mathbf{p}}$). Here, each sample is assigned a corresponding probability $\hat{p}$ that indicates the model's degree of *certainty* that it belongs to a particular class (in our case, cat or dog). Further, we set a threshold, usually 0.5, that the model will use to determine the final class prediction. For our cat ($y=0$) and dog ($y=1$) problem, a sample with a $\hat{p}$ value greater than 0.5 would receive the "dog" label for example. In order to predict classes, logistic regression maps predictions ($\hat{\mathbf{y}}$) to probabilities ($\hat{\mathbf{p}}$) via the *sigmoid* logit function:$$\tag{2}p = \sigma(y) = \frac{1}{1 + e^{-y}},$$which leads us to the equation for logistic regression: $$\tag{3}\hat{\mathbf{p}} = \sigma(\hat{\mathbf{y}}) = \frac{1}{1 + e^{-(\hat{\mathbf{w} \mathbf{X} + b})}}, $$ where the logistic model (binary classifier) learns the most *optimal* parameter values ($\mathbf{w}$ and $b$) by producing probabilities ($\hat{\mathbf{p}}$) that *maximize the likelihood* of predicting the observed data. Generally, the logistic regression equation from $(3)$ is compressed:$$\tag{4}\hat{\mathbf{p}} = \sigma(\hat{\mathbf{y}}) = \sigma(\hat{\mathbf{w} \mathbf{X} + b}),$$where $\sigma$ represents the sigmoid function (eq. $2$) in this case. Does this equation look similar to linear regression yet?To summarize logistic regression:- Category labels are converted to discrete integer values (e.g., 0 and 1).- The *sigmoid* logit function maps input features ($\mathbf{x}$) to probabilities (i.e., a number between 0 and 1).- A category prediction is determined by the threshold value (usually 0.5) and the probability (i.e., in our cat/dog example, a sample with a probability greater than 0.5 is classified as a dog image). - Logistic regression classifiers try to maximize *certainty*: predict a particular class with high confidence ($\hat{p}$ closer to 1) and be correct (after thresholding, $\hat{p} = y$), most of the time. Logistic Regression: predicting if it rains tomorrowNow that we know about the fundamentals of logistic regression, let's apply this method to a real-world problem--predicting if it rains the next day in Austrailia based on daily data. In this section, we will demonstrate in an end-to-end fashion the process of creating a logistic regression classifier: from building, to training, and finally evaluating the model. This process involves several steps:1. Find a dataset related to our question.2. Explore the dataset and prepare it for the model.3. Build the model.4. Train the model using an algorithm such as stochastic gradient descent.5. Evaluate the quality of our model.Draw conclusions.For step 1, we found the [Rain in Australia](https://www.kaggle.com/jsphyg/weather-dataset-rattle-package). The dataset contains approximately 145,000 samples when uncleaned, each representing daily weather data and whether it rained the next day. It provides data like location, temperature, amount of rainfall, windspeed and so much more! ###Code # import the libraries we be need import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # importing PyTorch import torch import torch.nn as nn ###Output _____no_output_____ ###Markdown 2. Explore the dataset and prepare it for our modelIn this section we will focus on defining the *features* ($\mathbf{x}$) and *labels* ($\mathbf{y}$) that we will use in our logistic regression classifier to predict next-day rain. As you will see, this require us to do some data cleaning and preprocessing. ###Code data_url = 'https://raw.githubusercontent.com/BreakoutMentors/Data-Science-and-Machine-Learning/main/datasets/weatherAUS.csv' df = pd.read_csv(data_url) df.head() # check the column types and get basic info df.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 145460 entries, 0 to 145459 Data columns (total 23 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Date 145460 non-null object 1 Location 145460 non-null object 2 MinTemp 143975 non-null float64 3 MaxTemp 144199 non-null float64 4 Rainfall 142199 non-null float64 5 Evaporation 82670 non-null float64 6 Sunshine 75625 non-null float64 7 WindGustDir 135134 non-null object 8 WindGustSpeed 135197 non-null float64 9 WindDir9am 134894 non-null object 10 WindDir3pm 141232 non-null object 11 WindSpeed9am 143693 non-null float64 12 WindSpeed3pm 142398 non-null float64 13 Humidity9am 142806 non-null float64 14 Humidity3pm 140953 non-null float64 15 Pressure9am 130395 non-null float64 16 Pressure3pm 130432 non-null float64 17 Cloud9am 89572 non-null float64 18 Cloud3pm 86102 non-null float64 19 Temp9am 143693 non-null float64 20 Temp3pm 141851 non-null float64 21 RainToday 142199 non-null object 22 RainTomorrow 142193 non-null object dtypes: float64(16), object(7) memory usage: 25.5+ MB ###Markdown Removing Null ValuesI will be doing two steps to remove null values1. Remove columns that have more than $\frac{1}{4}$ of missing data2. Remove rows that have any mission data ###Code # Printing the columns with more than 1/4 of column has nulls to remove those nan_cols = df.isna().sum() > (0.25 * df.shape[0]) nan_cols = nan_cols[nan_cols].index.tolist() print("These are columns that have more than 1/4 of the column missing") print(nan_cols) # Removing those columns df = df.drop(columns=nan_cols) # For easy cleaning, will be removing all rows with missing data df = df.dropna() print('Number of missing values: ', df.isna().sum().sum()) ###Output Number of missing values: 0 ###Markdown Feature Extraction from DatesThe 'Date' column contains the date of when the data was collected. If we are planning to predict if it will rain tomorrow, then you can believe months can have an effect of when it rains. Therefore, a new column named 'month' is created which will be used for the model.The 'Date' column is originally an *object* datatype column, but we will use `pd.to_datatime()` to convert the column data type into *datetime64[ns]* data type so pandas can understand the column as actual dates, not strings. Then using the `dt.month` attribute of `df['Date']`, you can get the month of every entry and save the returned pandas Series and save that Series to a new column in the dataframe.After saving the month, we can remove the 'Date' column since it we cannot input that into our model. ###Code # Changing column Date from object data type to datetime64[ns] df['Date'] = pd.to_datetime(df['Date']) # Creating a new column called 'month' df['month'] = df.Date.dt.month # look at the head of 'month' df.loc[:, ['Date', 'month']].head() df = df.drop(columns=['Date']) ###Output _____no_output_____ ###Markdown Translating string binary columns to [0, 1]Before translating these columns, we should look at the two distributions. Just to prevent confusion, the 'RainToday' column will be used as a feature variable, and 'RainTomorrow' will be used as the dependent variable which we will be predicting. ###Code df.RainToday.hist() plt.title('Distribution of RainToday') plt.show() df.RainTomorrow.hist() plt.title('Distribution of RainTomorrow') plt.show() ###Output _____no_output_____ ###Markdown You can see that these two distributions are very similar and that they are very unbalanced. When we separate the data into their training and test sets, we will even the class distribution in column 'Rain Tomorrow'. Now lets convert the 'No' and 'Yes' values to 0's and 1's. ###Code def binary_conversion(rain): # returns 1 if it rains if rain == 'Yes': return 1 # returns 0 if it did not rain else: return 0 # Converting those two columns df['RainToday'] = df['RainToday'].map(binary_conversion) df['RainTomorrow'] = df['RainTomorrow'].map(binary_conversion) ###Output _____no_output_____ ###Markdown One-Hot encoding all categorical dataThe way this is done is by one-hot encoding all columns with 'object' data type. ###Code # Getting columns with the object data type object_cols = df.select_dtypes(include=object).columns.tolist() # Saving one-hot encoded dataframes to concatenate later one_hot_dfs = [df.copy()] for col in object_cols: one_hot_df = pd.get_dummies(df[col], prefix=col) one_hot_dfs.append(one_hot_df) # Concatening all dataframes into df again df = pd.concat(one_hot_dfs, axis=1) # Dropping the columns used to one-hot encode df = df.drop(columns=object_cols) ###Output _____no_output_____ ###Markdown Balancing the DatasetThe reason why we balance the dataset by variable we try to predict is to prevent overfitting of the class of the highest frequency, for this situation it is 'No' for 'RainTomorrow' column. And in a statistical sense, the model learns the distribution of the training data to make predictions. So if the distribution is highly skewed to one class, it will just predict the class of highest frequency. ###Code def balance_dataset(df): df_copy = df.copy() df_rain = df_copy.loc[df['RainTomorrow']==1.0, :] df_dry = df_copy.loc[df['RainTomorrow']==0.0, :].sample(df_rain.RainTomorrow.value_counts()[1]) df_copy = pd.concat([df_rain, df_dry]).sample(frac=1, random_state=0) return df_copy df = balance_dataset(df) df.head() ###Output _____no_output_____ ###Markdown Separating data into Training and Test sets ###Code # define the x (features) and y (labels) variables y_col = 'RainTomorrow' x_cols = df.drop(columns=['RainTomorrow']).columns.tolist() # split the dataset into train/test datasets train = df.sample(frac=0.8, random_state=0) test = df.drop(train.index) # Standardizing data sets train = (train - train.min())/(train.max() - train.min()) test = (test - test.min())/(test.max() - test.min()) # Splitting training data into validation data valid = train.sample(frac=0.1, random_state=0) train = train.drop(valid.index) # Deleting rows sampled for validation data # separate the x (features) and y (labels) in the train/valid/test datasets and normalizing them train_features = torch.tensor(train[x_cols].values, dtype=torch.float) test_features = torch.tensor(test[x_cols].values, dtype=torch.float) valid_features = torch.tensor(valid[x_cols].values, dtype=torch.float) train_labels = torch.tensor(train[y_col].values.reshape(-1, 1), dtype=torch.float) test_labels = torch.tensor(test[y_col].values.reshape(-1, 1), dtype=torch.float) valid_labels = torch.tensor(valid[y_col].values.reshape(-1, 1), dtype=torch.float) print('train features shape:', train_features.shape) print('train labels shape:', train_labels.shape) print('validation features shape:', valid_features.shape) print('validation labels shape:', valid_labels.shape) print('test features shape:', test_features.shape) print('test labels shape:', test_labels.shape) print('first 5 test labels:\n', test_labels[:5]) ###Output train features shape: torch.Size([36027, 106]) train labels shape: torch.Size([36027, 1]) validation features shape: torch.Size([4003, 106]) validation labels shape: torch.Size([4003, 1]) test features shape: torch.Size([10008, 106]) test labels shape: torch.Size([10008, 1]) first 5 test labels: tensor([[1.], [1.], [0.], [1.], [1.]]) ###Markdown 3. Build the modelNow that the data is ready, we can build a model. We will use PyTorch to define a simple logistic regression model (single-layer neural network) to predict if it will rain the next day given the weather data of the current day. Given a sample with a corresponding prediction that is above 0.5, the model will assign the "rain" (1) category to it, otherwise it is categorized as "no rain". We also define the loss function and optimization algorithm. We will use *binary cross-entropy* loss, *stochastic gradient descent*, and track the *accuracy* metric. ###Code # building logistic model class Logistic_Model(nn.Module): """ @params num_features(int): The number of features to construct the input layer of the NN """ # Defining Constructor def __init__(self, num_features): super(Logistic_Model, self).__init__() # Defining Layers self.fc1 = nn.Linear(num_features, 1) # Define Sigmoid activation function self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.fc1(x) x = self.sigmoid(x) return x # Initializing model num_features = train_features.shape[1] model = Logistic_Model(num_features) # Moving model to use a GPU if available device = 'cuda' if torch.cuda.is_available() else 'cpu' model.to(device) model # Defining Loss Function criterion = nn.BCELoss() # Defining optimizer optimizer = torch.optim.SGD(model.parameters(), lr=0.2) ###Output _____no_output_____ ###Markdown 4. Train the modelNo it's time to train the model. We will train it for 100 *epochs* (iterations) with a *batch size* of 1024 (the number of training examples to evaluate prior to doing gradient descent). To use batches, we will load the data into PyTorch [`Datasets`](https://pytorch.org/docs/stable/data.htmldataset-types) and [`Dataloaders`](https://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader). We used [`TensorDataset`](https://pytorch.org/docs/stable/data.htmltorch.utils.data.TensorDataset) class that creates a PyTorch dataset using the features and labels we already have. ###Code batch_size = 1024 # Defining datasets train_dataset = torch.utils.data.TensorDataset(train_features, train_labels) test_dataset = torch.utils.data.TensorDataset(test_features, test_labels) valid_dataset = torch.utils.data.TensorDataset(valid_features, valid_labels) # Loading datasets into dataloaders train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) valid_dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False) # Function that takes output and returns predictions def get_predictions(output, threshold=0.5): predictions = torch.zeros(output.shape) for i in range(len(output)): if output[i] < threshold: predictions[i] = 0 else: predictions[i] = 1 return predictions epochs = 100 train_losses = [] train_accuracies = [] valid_losses = [] valid_accuracies = [] for epoch in range(1, epochs+1): train_loss = 0.0 valid_loss = 0.0 train_counts = 0 valid_counts = 0 ################### # train the model # ################### # Setting model to train mode model.train() for train_features, train_labels in train_dataloader: # Moving data to GPU if available train_features, train_labels = train_features.to(device), train_labels.to(device) # Setting all gradients to zero optimizer.zero_grad() # Calculate Output output = model(train_features) # Calculate Loss loss = criterion(output, train_labels) # Calculate Gradients loss.backward() # Perform Gradient Descent Step optimizer.step() # Saving loss train_loss += loss.item() # Get Predictions train_preds = get_predictions(output) # Saving number of right predictions for accuracy train_counts += train_preds.to(device).eq(train_labels).sum().item() ###################### # validate the model # ###################### # Setting model to evaluation mode, no parameters will change model.eval() for valid_features, valid_labels in valid_dataloader: # Moving data to GPU if available valid_features, valid_labels = valid_features.to(device), valid_labels.to(device) # Calculate Output output = model(valid_features) # Calculate Loss loss = criterion(output, valid_labels) # Saving loss valid_loss += loss.item() # Get Predictions valid_preds = get_predictions(output) # Saving number of right predictions for accuracy valid_counts += valid_preds.to(device).eq(valid_labels).sum().item() # Averaging and Saving Losses train_loss/=len(train_dataset) valid_loss/=len(valid_dataset) train_losses.append(train_loss) valid_losses.append(valid_loss) # Getting accuracies and saving them train_acc = train_counts/len(train_dataset) valid_acc = valid_counts/len(valid_dataset) train_accuracies.append(train_acc) valid_accuracies.append(valid_acc) print('Epoch: {} \tTraining Loss: {:.6f} \tTraining Accuracy: {:.2f}% \tValidation Loss: {:.6f} \tValidation Accuracy: {:.2f}%'.format(epoch, train_loss, train_acc*100, valid_loss, valid_acc*100)) plt.plot(valid_accuracies) plt.xlabel('epoch') plt.ylabel('Accuracy') plt.title('Validation Accuracy') plt.show() ###Output _____no_output_____ ###Markdown As the above plot suggests, our model converges to a validation accuracy close to 78%. 5. Evaluate the modelNow that we trained our model, it's time to evaluate it using the test dataset, which we did not use when training the model. This gives us a sense of how well our model predicts unseen data, which is the case when we use it in the real world. ###Code test_loss = 0.0 test_counts = 0 # Setting model to evaluation mode, no parameters will change model.eval() for test_features, test_labels in test_dataloader: # Moving data to GPU if available test_features, test_labels = test_features.to(device), test_labels.to(device) # Calculate Output output = model(test_features) # Calculate Loss loss = criterion(output, test_labels) # Saving loss test_loss += loss.item() # Get Predictions test_preds = get_predictions(output) # Saving number of right predictions for accuracy test_counts += test_preds.to(device).eq(test_labels).sum().item() # Calculating test accuracy test_acc = test_counts/len(test_dataset) print('Test Loss: {:.6f} \tTest Accuracy: {:.2f}%'.format(test_loss, test_acc*100)) ###Output Test Loss: 4.569631 Test Accuracy: 78.55% ###Markdown Our logistic regression model fitted to the data fairly, correctly predicting the next-day's weather around 77% to 78% of the time. The distribution of rainy days and non-rainy days in our datasets originally was not balanced, but the data was balanced before training. To give our results more context, we should check the *confusion matrix* to see how the model's predictions were distributed.A confusion matrix indicates the number of correct predictions and incorrect predictions for each class. It is particularly useful whenever the data has an imbalanced representation of the classes. The diagonals of a confusion matrix indicate the correct predictions for each class, while the cross-diagonal indicates misclassified predictions. Below is an example of a binary classification confusion matrix. A basic confusion matrix ###Code from sklearn import metrics # Getting test features and labels test_features = test_dataset.tensors[0].to(device) test_labels = test_dataset.tensors[1].to(device) test_predictions = get_predictions(model(test_features)).numpy() # Converting labels and predictions to numpy arrays test_predictions = get_predictions(model(test_features)).numpy() test_labels = test_labels.cpu().numpy() # measure the accuracy model_acc = metrics.accuracy_score(test_labels, test_predictions) print(f'logistic regression model accuracy: {round(model_acc*100, 2)}%') # plot confusion matrix cm = metrics.confusion_matrix(test_labels, test_predictions) print('confusion matrix:\n', cm) plt.imshow(cm, cmap=plt.cm.Blues) plt.xlabel("Predicted labels") plt.ylabel("True labels") plt.xticks([0, 1], [0, 1]) plt.yticks([0, 1], [0,1]) plt.title('Confusion matrix ') plt.colorbar() plt.show() ###Output logistic regression model accuracy: 78.55% confusion matrix: [[3992 1011] [1136 3869]] ###Markdown From the confusion matrix, we can deduce that the model performed relatively well. The number of misclassified samples of both rainy and non-rainy days were close. Nonetheless, it's important to be aware of misleading model results, therefore the confusion matrix is very important tool to use.Let's take a look at a more comprehensive set of evaluation metrics: accuracy, precision, and recall. Precision indicates the model's ability to return only relevant instances. While recall indicates the model's ability to identify all relevant instances; and depending on our data we may want a higher precision score or vice versa. If your curious, here is an in-depth discussion about these metrics: [Beyond Accuracy: Precision and Recall](https://towardsdatascience.com/beyond-accuracy-precision-and-recall-3da06bea9f6c). ###Code print("Accuracy: {}%".format(round(model_acc*100, 2))) print("Precision:", metrics.precision_score(test_labels, test_predictions, zero_division=True)) print("Recall:" ,metrics.recall_score(test_labels, test_predictions, zero_division=True)) ###Output Accuracy: 78.55% Precision: 0.792827868852459 Recall: 0.773026973026973 ###Markdown What's the take away from all this...**Always, always contextualize the model's results.** Summary- We use *one-hot encoding* to represent categorical data.- Logistic regression is popular and foundational algorithm for classification in machine learning and deep learning (neural networks). - The *sigmoid* logit function maps the input features to a probability distribution.- Linear and logistic regression are very similar, they differ in two ways. First, the labels are continous numerical values in linear regression, while they are discrete numerical values (0 and 1) each representing a particular category. Second, logistic regression uses the sigmoid function to transform the input features into a probability space and the model learns the optimal parameters to maximize the probability of confidently predicting the correct class. ###Code ###Output _____no_output_____
pandas_viz_histogram.ipynb
###Markdown read data ###Code df=pd.read_csv('https://github.com/prasertcbs/tutorial/raw/master/mpg.csv') df.head() ###Output _____no_output_____ ###Markdown explore data ###Code df.cty.plot(kind='box'); df.hist(); df.cty.hist(); df.cty.hist(); df.cty.hist(bins=20); df[['cty', 'hwy']].hist(grid=False, color='orange', sharex=True, sharey=True); df[['cty', 'hwy']].plot.hist(alpha=.5) df[['cty', 'hwy']].plot.hist(); df.cty.plot.density(); df.cty.plot.kde(); df.sample(10) df['class'].value_counts() df['class'].value_counts().plot.bar() df['class'].value_counts().plot.barh(color='orange') df.cty.mean() df['class'].value_counts().plot.barh(color='.7') df['class'].value_counts().sort_values().plot.barh(color='.7') ###Output _____no_output_____
tutorials/TIDY_tutorial.ipynb
###Markdown Finding Mislabelled Samples through ResNet MNIST Training Process This notebook trains a ResNet model using MNIST dataset and employed TrainIng Data analYzer (TIDY) method based on Forgetting Events algorithm, specifically `ForgettingEventsInterpreter`, to investigate the training process by recording the predictions in the process. Some samples are manually mislabelled and we are able to find them by looking into the predictions along the training. ###Code import paddle import numpy as np from PIL import Image import matplotlib.pyplot as plt import interpretdl as it ###Output _____no_output_____ ###Markdown Define a ResNet architecture for MNIST, the code is borrowed from [PaddlePaddle Official Documentation](https://www.paddlepaddle.org.cn/tutorials/projectdetail/1516124). ###Code import paddle.nn as nn import paddle.nn.functional as F class ConvBNLayer(paddle.nn.Layer): def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): super(ConvBNLayer, self).__init__() self._conv = nn.Conv2D( in_channels=num_channels, out_channels=num_filters, kernel_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, bias_attr=False) self._batch_norm = paddle.nn.BatchNorm2D(num_filters) self.act = act def forward(self, inputs): y = self._conv(inputs) y = self._batch_norm(y) if self.act == 'leaky': y = F.leaky_relu(x=out, negative_slope=0.1) elif self.act == 'relu': y = F.relu(x=y) return y class BottleneckBlock(paddle.nn.Layer): def __init__(self, num_channels, num_filters, stride, shortcut=True): super(BottleneckBlock, self).__init__() self.conv0 = ConvBNLayer( num_channels=num_channels, num_filters=num_filters, filter_size=1, act='relu') self.conv1 = ConvBNLayer( num_channels=num_filters, num_filters=num_filters, filter_size=3, stride=stride, act='relu') self.conv2 = ConvBNLayer( num_channels=num_filters, num_filters=num_filters * 4, filter_size=1, act=None) if not shortcut: self.short = ConvBNLayer( num_channels=num_channels, num_filters=num_filters * 4, filter_size=1, stride=stride) self.shortcut = shortcut self._num_channels_out = num_filters * 4 def forward(self, inputs): y = self.conv0(inputs) conv1 = self.conv1(y) conv2 = self.conv2(conv1) if self.shortcut: short = inputs else: short = self.short(inputs) y = paddle.add(x=short, y=conv2) y = F.relu(y) return y class ResNet(paddle.nn.Layer): def __init__(self, layers=50, class_dim=1): super(ResNet, self).__init__() self.layers = layers supported_layers = [50, 101, 152] assert layers in supported_layers, \ "supported layers are {} but input layer is {}".format(supported_layers, layers) if layers == 50: depth = [3, 4, 6, 3] elif layers == 101: depth = [3, 4, 23, 3] elif layers == 152: depth = [3, 8, 36, 3] num_filters = [64, 128, 256, 512] self.conv = ConvBNLayer( num_channels=1, num_filters=64, filter_size=7, stride=2, act='relu') self.pool2d_max = nn.MaxPool2D( kernel_size=3, stride=2, padding=1) self.bottleneck_block_list = [] num_channels = 64 for block in range(len(depth)): shortcut = False for i in range(depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BottleneckBlock( num_channels=num_channels, num_filters=num_filters[block], stride=2 if i == 0 and block != 0 else 1, # c3、c4、c5将会在第一个残差块使用stride=2;其余所有残差块stride=1 shortcut=shortcut)) num_channels = bottleneck_block._num_channels_out self.bottleneck_block_list.append(bottleneck_block) shortcut = True self.pool2d_avg = paddle.nn.AdaptiveAvgPool2D(output_size=1) import math stdv = 1.0 / math.sqrt(2048 * 1.0) self.out = nn.Linear(in_features=2048, out_features=class_dim, weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv))) def forward(self, inputs): y = self.conv(inputs) y = self.pool2d_max(y) for bottleneck_block in self.bottleneck_block_list: y = bottleneck_block(y) y = self.pool2d_avg(y) y = paddle.reshape(y, [y.shape[0], -1]) y = self.out(y) return y ###Output _____no_output_____ ###Markdown Use the MNIST dataset generator from **paddle.vision** to get the labels and manually mislabel 1% samples. ###Code from paddle.vision.transforms import ToTensor, Resize, Compose from paddle.vision.datasets import MNIST train_dataset = MNIST(mode='train', transform=Compose([Resize(size=32), ToTensor()])) # Prepare manually mislabelled samples labels = [] for i in range(0, 60000, 100): labels.append(np.random.choice(np.delete(np.arange(10), train_dataset[i][-1]))) ###Output _____no_output_____ ###Markdown Initialize the model. ###Code model = ResNet(class_dim=10) ###Output _____no_output_____ ###Markdown Define a new data generator based on MNIST data generator. It replaces 1% true labels by the wrong ones. **Important:** the data generator shoud generate the index of each sample as the first element so that each sample's behavior can be recorded according to its index. ###Code def reader_prepare(dataset, new_labels): def reader(): idx = 0 for data, label in dataset: if idx % 100 == 0: label = new_labels[idx // 100] yield idx, data, int(label) idx += 1 return reader ###Output _____no_output_____ ###Markdown Set up a data loader with batch size of 128, and an Momentum optimizer for training. ###Code BATCH_SIZE = 128 train_reader = paddle.batch( reader_prepare(train_dataset, labels), batch_size=BATCH_SIZE) optimizer = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameters=model.parameters()) ###Output _____no_output_____ ###Markdown First initialize the `ForgettingEventsInterpreter` and then start `interpret`ing the training process by training 100 epochs. *stats* is a dictionary that maps image index to predictions in the training process and if they are correct; *noisy_samples* is a list of mislabelled image ids. *stats* is saved at "assets/stats.pkl". ###Code fe = it.ForgettingEventsInterpreter(model, True) epochs = 100 print('Training %d epochs. This may take some time.' % epochs) stats, noisy_samples = fe.interpret( train_reader, optimizer, batch_size=BATCH_SIZE, epochs=epochs, noisy_labels=True, save_path='assets') ###Output Training 100 epochs. This may take some time. | Epoch [ 1/100] Iter[ 2] Loss: 2.5311 Acc@1: 10.938% ###Markdown Calculate the recall, precision and F1 for our found noisy samples. 88.7% of mislabelled samples have been found and among those samples found, 80.1% are indeed mislabelled. ###Code recall = np.sum([id_ % 100 == 0 for id_ in noisy_samples]) / (60000 / 100) precision = np.sum([id_ % 100 == 0 for id_ in noisy_samples]) / len(noisy_samples) print('Recall: ', recall) print('Precision: ', precision) print('F1 Score: ', 2 * (recall * precision) / (recall + precision)) ###Output Recall: 0.8866666666666667 Precision: 0.8012048192771084 F1 Score: 0.8417721518987342
samples/kmeans_sample.ipynb
###Markdown K-means Clustering`k-means` is the most widely-used `centroid-based clustering` algorithm. Import all necessary libraries ###Code import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics from sklearn.cluster import KMeans %matplotlib inline ###Output _____no_output_____ ###Markdown Read data ###Code data = np.genfromtxt("../data/kmeans_sample_dataset.csv", delimiter=',') ###Output _____no_output_____ ###Markdown Plot the data ###Code plt.figure(figsize=(15, 8)) plt.scatter(data[:, 0], data[:, 1]) ###Output _____no_output_____ ###Markdown K-means- [sklearn.cluster.KMeans](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html) - cluster_centers_ (array, [n_clusters, n_features]): Coordinates of cluster centers. If the algorithm stops before fully converging (see tol and max_iter), these will not be consistent with labels_. - labels_: Labels of each point - inertia_ (float): Sum of squared distances of samples to their closest cluster center. - n_iter_ (int): Number of iterations run. ###Code kmeans = KMeans(n_clusters=3).fit(data) cluster_labels = kmeans.labels_ cluster_centers = kmeans.cluster_centers_ # plot plt.figure(figsize=(15, 8)) plt.scatter(data[:, 0], data[:, 1], s=40, c=cluster_labels, cmap=plt.cm.prism) plt.title('K-Means Clustering Results with K=3') plt.scatter(cluster_centers[:, 0], cluster_centers[:, 1], marker='+', c='k', s=100, linewidth=2) ###Output _____no_output_____ ###Markdown Optimal number of clusters Elbow Method- WCSS (within-cluster sum of squares) ###Code wcss = [] for i in range(1, 9): kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10,random_state=10).fit(data) wcss.append(kmeans.inertia_) plt.subplots(figsize=(15, 7)) plt.plot(range(1, 9), wcss, 'bo-') plt.title('The elbow Method', fontsize=15) plt.xlabel('Number of clusters', fontsize=15) plt.ylabel('wcss', fontsize=15) plt.grid() plt.show() ###Output _____no_output_____ ###Markdown [Option] Implementing the K-means clustering algorithmThe K-means algorithm is a method to automatically cluster similar data examples together. DescriptionGiven a set of observations $\{x_{1}, x_{2}, \cdots, x_{n}\}$, and want to group the data into $k(≤n)$ sets $S = \{S_{1}, S_{2}, \cdots, S_{k}\}$ so as to minimize the `within-cluster sum of squares (WCSS)`. Formally, the objective is to find:$$ argmin \sum_{i=1}^{k} \sum_{x\in S_{i}} \lvert\rvert x - \mu_i \lvert\rvert^2 $$where $\mu_{i}$ is the mean of points in $S_{i}$ (centroids of sets). Algorithm Standard algorithm (naive k-means)The K-means algorithm is as follows: 1. Initial centroids 2. Assigning examples to the closest centroids 3. Recomputing the centroids based on the assignments. 4. Repeate step 2 and 3. ###Code # Settings for running K-Means K = 3 MaxIters = 10 # Here, we set centroids to specific values first initial_centroids = np.array([[3, 3], [6, 2], [8, 5]]) ###Output _____no_output_____ ###Markdown Finding closest centroidsIn the “Assignment step”, the algorithm assigns every example $x_{i}$ to its closest centroid, given the current positions of centroids. Specifically, for every example $i$ we set$$ c_{i} = argmin \sum_{j=1}^{k} \lvert\rvert x_{i} - \mu_j \lvert\rvert^2 $$- $c_{i}$ is the index of the centroid that is closest to $x_{i}$* Note that $c_{i}$ corresponds to idx[i] in the code. ###Code def find_closest_centroids(X, centroids): """ Computes the centroid memberships for every example. @Parameters X: (n, m) -> n examples each of m dimensions. centroids: (K, m) -> K is the number of clusters. @Returns idx: (n,) -> The centroids assignment for each example. """ # Initialize values K = centroids.shape[0] n = X.shape[0] idx = np.zeros(n, dtype=int) distance = np.zeros(K) # Compute centroid for i in range(n): for k in range(K): distance[k] = np.sum(np.square(X[i, :] - centroids[k, :]), axis=0) idx[i] = np.argmin(distance) return idx # Find the closest centroids for the examples using the initial_centroids idx = find_closest_centroids(data, initial_centroids) print('Closest centroids for the first 3 examples:') print(idx[:3]) print('(the closest centroids should be 0, 2, 1 respectively)') ###Output Closest centroids for the first 3 examples: [0 2 1] (the closest centroids should be 0, 2, 1 respectively) ###Markdown Computing centroid meansGiven assignments of every point to a centroid, the second phase of the algorithm recomputes, for each centroid, the mean of the points that were assigned to it. Specifically, for every centroid $k$ we set$$ \mu_k = \frac{1}{\left| S_k\right|} \sum_{i \in S_k} x^{(i)}$$where $S_k$ is the set of examples that are assigned to centroid $k$. Concretely, if two examples say $x^{(3)}$ and $x^{(5)}$ are assigned to centroid $k = 2$, then you should update $\mu_2 = \frac{1}{2} \left( x^{(3)} + x^{(5)} \right)$. ###Code def compute_centroids(X, idx, K): """ Returns the new centroids by computing the means of the data points assigned to each centroid. @Parameters X: (n, m) -> n examples each of m dimensions. idx: (n,) -> The centroids assignment for each example. K : int -> Number of clusters. @Returns centroids: (K, m) -> Each row is the mean of the data points assigned to it. """ n, d = X.shape centroids = np.zeros((K, d)) # Loop over k for k in range(K): centroids[k, :] = X[idx == k, :].mean(axis=0) return centroids # Compute means based on the closest centroids found in the previous part. centroids = compute_centroids(data, idx, 3) print('Centroids computed after initial finding of closest centroids:') print(centroids) print('\nThe centroids should be') print(' [ 2.428301 3.157924 ]') print(' [ 5.813503 2.633656 ]') print(' [ 7.119387 3.616684 ]') ###Output Centroids computed after initial finding of closest centroids: [[2.42830111 3.15792418] [5.81350331 2.63365645] [7.11938687 3.6166844 ]] The centroids should be [ 2.428301 3.157924 ] [ 5.813503 2.633656 ] [ 7.119387 3.616684 ] ###Markdown Train K-means ###Code def train_k_means(X, centroids, max_iters=10):#, print_iteration = False): """ Runs the K-means algorithm. @Parameters X: (n, m) -> n examples each of m dimensions. centroids: (k, m) -> Initial centroid location for each clusters. max_iters: int -> Specifies the total number of interactions of K-Means to execute. @Returns centroids: (K, m) -> The computed (updated) centroids. idx: (n,) -> The centroids assignment for each example. """ K = centroids.shape[0] idx = None #idx_history = [] #centroid_history = [] for i in range(max_iters): idx = find_closest_centroids(X, centroids) centroids = compute_centroids(X, idx, K) return centroids, idx # Run K-Means algorithm. The 'true' at the end tells our function to plot # the progress of K-Means centroids, idx = train_k_means(data, initial_centroids, max_iters=MaxIters) # plot plt.figure(figsize=(15, 8)) plt.scatter(data[:, 0], data[:, 1], s=40, c=idx, cmap=plt.cm.prism) plt.title('K-Means Clustering Results with K=3') plt.scatter(centroids[:, 0], centroids[:, 1], marker='+', c='k', s=100, linewidth=2) ###Output _____no_output_____ ###Markdown Initialization method- Forgy method: Randomly chooses k observations from the dataset and uses these as the initial means.- Random Partition: First randomly assigns a cluster to each observation and then proceeds to the update step, thus computing the initial mean to be the centroid of the cluster's randomly assigned points. In practice, a good strategy for initializing the centroids is to select random examples from the dataset. ###Code def init_centroids(X, K): """ This function initializes K centroids that are to be used in K-means on the dataset X. @Parameters X: (n, m) -> n examples each of m dimensions. K: int -> The number of clusters. @Returns centroids: (K, m) -> Centroids of the clusters """ # Initializr value, number of rows and cloumns n, m = X.shape centroids = np.zeros((K, m)) # Randomly reorder the indices of examples randidx = np.random.permutation(X.shape[0]) # Take the first K examples as centroids centroids = X[randidx[:K], :] return centroids K = 3 max_iters = 10 initial_centroids = init_centroids(data, K) centroids, idx = train_k_means(data, initial_centroids, max_iters = 10) # plot plt.figure(figsize=(15, 8)) plt.scatter(data[:, 0], data[:, 1], s=40, c=idx, cmap=plt.cm.prism) plt.title('K-Means Clustering Results with K=3') plt.scatter(centroids[:, 0], centroids[:, 1], marker='+', c='k', s=100, linewidth=2) ###Output _____no_output_____
AR1/reduction/antenna_pointing/jupyter_analysis/Raster scan evaluation from Pointing Observation.ipynb
###Markdown RvR: Stolen useful script from Lindsay's notebooks ###Code %pylab inline import os import katdal import katpoint import scape ###Output _____no_output_____ ###Markdown User Input ###Code #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/29/1456759234.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/27/1456557706.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/26/1456520379.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/26/1456505723.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456155415.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456149609.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456114745.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/17/1455726323.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/17/1455699514.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/16/1455651343.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/16/1455637289.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/05/1454691824.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/01/1454365764.h5' #antenna_name='m062' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/26/1456505723.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456155415.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456149609.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/22/1456114745.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/17/1455726323.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/17/1455699514.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/16/1455651343.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/16/1455637289.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/09/1455059941.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/09/1455045090.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/08/1454948408.h5' #filename = '/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/05/1454691824.h5' #filename='/var/kat/archive/data/MeerKATAR1/telescope_products/2016/02/01/1454365764.h5' antenna_name='m063' ###Output _____no_output_____ ###Markdown Processing Script ###Code nd_models = '/var/kat/katconfig/user/noise-diode-models/mkat/' h5 = katdal.open(filename,centre_freq=1284.0e6) h5.select(reset='T') N = len(h5.compscan_indices) #h5.select(scans='scan',channels=slice(1024,1024+2048)) h5.select(ants=antenna_name, scans='scan', channels=slice(1024,1024+2048)) ant = h5.ants[0] for c in h5.compscans(): try: d = scape.DataSet(h5, baseline="%s" % (ant.name,), nd_models=nd_models) except IOError: d = scape.DataSet(h5, baseline="%s" % (ant.name,), nd_h_model='/var/kat/katconfig/user/noise-diode-models/mkat/rx.l.4.h.csv', nd_v_model='/var/kat/katconfig/user/noise-diode-models/mkat/rx.l.4.v.csv') if not d is None: d = d.select(flagkeep='~nd_on') for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=1,spike_width=3,outlier_sigma=5.) d.average() d.fit_beams_and_baselines() if d.compscans[0].beam == None: continue if d.compscans[0].beam.is_valid: # if True: #d.compscans[0].beam.is_valid: figure(figsize=(15,5)) scape.plot_compound_scan_in_time(d.compscans[0]) title(str(d.compscans[0].target) + '. Compscan %d in %d'%(c[0],N) + '. Beamfit valid: %s'%str(d.compscans[0].beam.is_valid)) ###Output WARNING:katdal.sensordata:Last data point for sensor 'Antennas/m063/pos_actual_scan_azim' arrives 772.038 seconds before end of data set - extrapolation may lead to ridiculous values WARNING:katdal.sensordata:Last data point for sensor 'Antennas/m063/pos_actual_scan_elev' arrives 771.534 seconds before end of data set - extrapolation may lead to ridiculous values ###Markdown Output and Report ###Code # Default output file names are based on input file name dataset_name = os.path.splitext(os.path.basename(filename))[0] outfilebase = '%s_%s_point_source_raster_scans_beamfit_report' % (dataset_name,antenna_name) from matplotlib.backends.backend_pdf import PdfPages # Generate output report with PdfPages(outfilebase+'.pdf') as pdf: pagetext = "\nPoint Observation" pagetext += "\n\nDescription: %s\nName: %s\nExperiment ID: %s" %(h5.description, h5.name, h5.experiment_id) pagetext = pagetext + "\n" pagetext += "\n\nTest Setup:" pagetext += "\nRaster Scan across bright source" pagetext += "\n\nAntenna %s" % antenna_name pagetext += "\n------------" pagetext = pagetext + "\n" plt.figure(None,figsize = (16,8)) plt.axes(frame_on=False) plt.xticks([]) plt.yticks([]) plt.title("AR1 Report %s"%outfilebase ,fontsize=14, fontweight="bold") plt.text(0,0,pagetext,fontsize=12) pdf.savefig() plt.close() h5.select(reset='T') N = len(h5.compscan_indices) h5.select(ants=antenna_name, scans='scan', channels=slice(1024,1024+2048)) ant = h5.ants[0] for c in h5.compscans(): d = scape.DataSet(h5, baseline="%s" % (ant.name,), nd_models=nd_models) if not d is None: d = d.select(flagkeep='~nd_on') for i in range(len(d.scans)): d.scans[i].data = scape.stats.remove_spikes(d.scans[i].data,axis=1,spike_width=3,outlier_sigma=5.) d.average() d.fit_beams_and_baselines() if d.compscans[0].beam == None: continue if True: #d.compscans[0].beam.is_valid: plt.figure(figsize=(15,5)) scape.plot_compound_scan_in_time(d.compscans[0]) plt.title(str(d.compscans[0].target) + '. Compscan %d in %d'%(c[0],N) + '. Beamfit valid: %s'%str(d.compscans[0].beam.is_valid)) pdf.savefig() plt.close() d = pdf.infodict() import datetime d['Title'] = h5.description d['Author'] = 'AR1' d['Subject'] = 'AR1 Pointing Observation' d['CreationDate'] = datetime.datetime(2015, 8, 13) d['ModDate'] = datetime.datetime.today() ###Output _____no_output_____
project-dog-classification/dog_app.ipynb
###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def check_detector_accuracy(detector_name, species_name, file_paths, detector): percentage = sum([detector(path) for path in tqdm(files)]) / len(files) * 100 print(detector_name, ' detector on', species, 'with ', percentage, ' percent accuracy' ) return percentage for detector_name, species, files in (('human','humans', human_files_short), ('human','dogs', dog_files_short)): check_detector_accuracy(detector_name, species, files, face_detector) ###Output 100%|██████████| 100/100 [00:02<00:00, 35.62it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms from torch.autograd import Variable # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True transform = transforms.Compose( [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' #print(img_path) img = Image.open(img_path) #print(img) t_img = Variable(torch.unsqueeze(transform(img), 0)) if use_cuda: t_img = t_img.cuda() val = VGG16(t_img) _, index = torch.max(val, 1) #print(index) return index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): index = VGG16_predict(img_path) if index >=151 and index <= 268: return True return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. for detector_name, species, files in (('dog', 'humans', human_files_short), ('dog', 'dogs', dog_files_short)): check_detector_accuracy(detector_name, species, files, dog_detector) ###Output 100%|██████████| 100/100 [00:03<00:00, 29.09it/s] 3%|▎ | 3/100 [00:00<00:03, 26.44it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets batch_size = 20 num_workers = 2 shuffle=True train_transform = transforms.Compose([ transforms.Resize(256), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) loaders_scratch = { 'train': torch.utils.data.DataLoader(datasets.ImageFolder('/data/dog_images/train', train_transform), batch_size=batch_size, shuffle=shuffle, num_workers=num_workers), 'valid': torch.utils.data.DataLoader(datasets.ImageFolder('/data/dog_images/valid', transform), batch_size=batch_size, shuffle=shuffle, num_workers=num_workers), 'test': torch.utils.data.DataLoader(datasets.ImageFolder('/data/dog_images/test', transform), batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) } for batch_type in loaders_scratch: print('Dataset segment ' + batch_type) print('Classes ' + str(len(loaders_scratch[batch_type].dataset.classes))) print(loaders_scratch[batch_type].dataset) ###Output Dataset segment train Classes 133 Dataset ImageFolder Number of datapoints: 6680 Root Location: /data/dog_images/train Transforms (if any): Compose( Resize(size=256, interpolation=PIL.Image.BILINEAR) RandomHorizontalFlip(p=0.5) RandomRotation(degrees=(-10, 10), resample=False, expand=False) RandomResizedCrop(size=(224, 224), scale=(0.08, 1.0), ratio=(0.75, 1.3333), interpolation=PIL.Image.BILINEAR) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ) Target Transforms (if any): None Dataset segment valid Classes 133 Dataset ImageFolder Number of datapoints: 835 Root Location: /data/dog_images/valid Transforms (if any): Compose( Resize(size=256, interpolation=PIL.Image.BILINEAR) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ) Target Transforms (if any): None Dataset segment test Classes 133 Dataset ImageFolder Number of datapoints: 836 Root Location: /data/dog_images/test Transforms (if any): Compose( Resize(size=256, interpolation=PIL.Image.BILINEAR) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ) Target Transforms (if any): None ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:Images were center cropped to 224 px squared because vgg was trained on that image size. This was after it was resized to 256. Most of all the images I saw the dogs were in the center so this ways resize -> center crop I would get a dog in the image. For training I also threw in some variation transforms such as rotations and flipping on the horizontal to allow for a more robust detector. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() # convolutional layer (sees 224x224x3 image tensor) self.conv1 = nn.Conv2d(3, 16, 3, padding=1) # convolutional layer (sees 112x112x16 tensor) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) # convolutional layer (sees 56x56x32 tensor) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) # convolutional layer (sees 28x28x64 tensor) self.conv4 = nn.Conv2d(64, 128, 3, padding=1) # convolutional layer (sees 14x14x128 tensor) self.conv5 = nn.Conv2d(128, 256, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (256 * 7 * 7 -> 512) self.fc1 = nn.Linear(256 * 7 * 7, 512) # linear layer (512 -> 133) self.fc2 = nn.Linear(512, 133) self.dropout = nn.Dropout(0.25) def forward(self, x): # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) # flatten image input x = x.view(-1, 256 * 7 * 7) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv5): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=12544, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=133, bias=True) (dropout): Dropout(p=0.25) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Initially I copied over the CNN from cifar example and used that. I added 2 more layers to decrease the size of the first hidden which was still pretty sizable. The output of the seccond hidden was reduced the number of classes we had, which in our case was the 133 breeds. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim from torch import nn ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item()*data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item()*data.size(0) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model COMMENTED OUT USING TRAINED IN GPU #model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, # criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.091582 Test Accuracy: 27% (227/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn # https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html#convnet-as-fixed-feature-extractor model_transfer = models.resnet18(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False num_ftrs = model_transfer.fc.in_features model_transfer.fc = nn.Linear(num_ftrs, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__using the example in:https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.htmlconvnet-as-fixed-feature-extractor and set the number of outputs to 133 matching the number of breeds and freezing the last layer (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code from torch.optim import lr_scheduler criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr=0.001, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_transfer, step_size=7, gamma=0.1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 5 # COMMENTED OUT USING TRAINED MODEL ON GPU #model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.798886 Test Accuracy: 78% (659/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): transform = transforms.Compose( [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img = Image.open(img_path) t_img = Variable(torch.unsqueeze(transform(img), 0)) if use_cuda: t_img = t_img.cuda() val = model_transfer(t_img) _, index = torch.max(val, 1) return index ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def display_image(img_path): img = cv2.imread(img_path) width, height = 300, int(300 * img.shape[0]/ img.shape[1]) #width=300 w/aspected height img = cv2.resize(img, (width, height)) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() def run_app(img_path): #print('processing ' + img_path) human = face_detector(img_path) dog = dog_detector(img_path) if human or dog: print('Hello, ' + 'dog!' if dog else 'human!') breed_index = predict_breed_transfer(img_path) print('You look like a ' + class_names[breed_index]) display_image(img_path) else: print('Something went wrong. No human or dog found image-path:' + img_path) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Its better than I expected because I only trained it for 5 epochs but got such high accuracy.For improvements:- more data for each of the types of breeds- augment the training data to allow for better validation/test results- maybe used other pretrained models to see if I could get a better results- was the best criterion/optimizer for this task? ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3], ['profile_me.png'])): run_app(file) ###Output human! You look like a Chihuahua ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/home/en/Git/deep-learning-datasets/lfw/*/*")) dog_files = np.array(glob("/home/en/Git/deep-learning-datasets/dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. faces_detected = 0 dog_faces_detected = 0 for f in human_files_short: faces_detected += face_detector(f) for f in dog_files_short: dog_faces_detected += face_detector(f) print("Human faces detected: {:.2f}, {:d}/{:d}".format(faces_detected/len(human_files_short), faces_detected, len(human_files_short))) print("Dog faces detected: {:.2f}, {:d}/{:d}".format(dog_faces_detected/len(dog_files_short), dog_faces_detected, len(dog_files_short))) ###Output Human faces detected: 0.99, 99/100 Dog faces detected: 0.07, 7/100 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") VGG16.to(device) ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' # set model to eval: VGG16.eval() # load image I = Image.open(img_path) # resize to 224 x 224 I = I.resize((224, 224), Image.ANTIALIAS) # convert to np array: im = np.array(I)/255.0 # transpose fo 3 x 224 x 224 im = np.transpose(im,(2,0,1)) # add dimension to simulate batch for vgg model: im = np.expand_dims(im,0) # convert im data to tensor: t = torch.tensor(im) t = t.to(device) # conver to float t = t.float() # run model with softmax output to normalize output weights ps = torch.softmax(VGG16.forward(t),1) # get index of max weight: _, i = ps.max(1) return i # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. if 151<= VGG16_predict(img_path) <= 268: return True # true/false else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code faces_detected_vgg16 = 0 dog_faces_detected_vgg16 = 0 for f in human_files_short: faces_detected_vgg16 += dog_detector(f) for f in dog_files_short: dog_faces_detected_vgg16 += dog_detector(f) print("Human faces detected sing VGG16: {:.2f}, {:d}/{:d}".format(faces_detected_vgg16/len(human_files_short), faces_detected_vgg16, len(human_files_short))) print("Dog faces detected using VGG16: {:.2f}, {:d}/{:d}".format(dog_faces_detected_vgg16/len(dog_files_short), dog_faces_detected_vgg16, len(dog_files_short))) ###Output Human faces detected sing VGG16: 0.00, 0/100 Dog faces detected using VGG16: 0.99, 99/100 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import numpy as np import torch from torchvision import datasets, transforms ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_dir = '/home/en/Git/deep-learning-datasets/dogImages' batch_size = 32 num_workers = 6 image_size = 224 # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) valid_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) loaders_scratch = {'train':trainloader, 'valid':validloader,'test':testloader} numClasses = len(train_data.classes) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:1. The images are resized to 224 x 224 with a random similar to the required input for the VGG network.2. The input tensor is of size [32, 3, 224, 224] which gives a batch of 32 images with 3 color channels, resized to a square of 224x224, to mirror the architectures of VGG and others3. The dataset was augemented to increase the dataset size and allow the network to better handle variations in input images. The augemntations used include a random rotation, random crop, and random horizontal flip. The input data was also normalized across the color channels. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # convolutional layer (sees 32x32x3 image tensor) self.conv1 = nn.Conv2d(3, 16, 3, padding=1) # convolutional layer (sees 16x16x16 tensor) self.conv2 = nn.Conv2d(self.conv1.out_channels, 32, 3, padding=1) # convolutional layer (sees 8x8x32 tensor) self.conv3 = nn.Conv2d(self.conv2.out_channels, 64, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (64 * 4 * 4 -> 500) self.fc1 = nn.Linear(64 * 28 * 28, 1000) # linear layer (500 -> 10) self.fc2 = nn.Linear(1000, numClasses) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) # flatten image input x = x.view(-1, 64 * 28 * 28) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available model_scratch.to(device) print(model_scratch) ###Output Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=50176, out_features=1000, bias=True) (fc2): Linear(in_features=1000, out_features=133, bias=True) (dropout): Dropout(p=0.25) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. **__Answer:__**The architecture is based on the one provided in the course material, with three banks of 2D conv filters, each with increasing number of filters joined with a maxpool 2x2 operation. This results in three banks of filters that are meant to train kernels for different sized features, with the first bank on smallest features, and the last bank on largers features.Following the conv layers, the net is flattened and pass through two reduction layers, one reduces all the outputs from the last conv layer to 1000 features, and the final layer further reduces the 1000 to the number of classes.Two drop out layers were added between the conv and linear and between the linear layers to improve the net's robusness. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def train(n_epochs, loaders, model, optimizer, criterion, device, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 correct = 0. total = 0. ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU data, target = data.to(device), target.to(device) ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += loss.item()*data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU data, target = data.to(device), target.to(device) ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item()*data.size(0) pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) # calculate average losses train_loss = train_loss/len(loaders['train'].dataset) valid_loss = valid_loss/len(loaders['valid'].dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) print('\Validation Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, device, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.866845 Validation Loss: 4.811555 \Validation Accuracy: 1% (13/835) Validation loss decreased (inf --> 4.811555). Saving model ... Epoch: 2 Training Loss: 4.754617 Validation Loss: 4.605181 \Validation Accuracy: 4% (34/835) Validation loss decreased (4.811555 --> 4.605181). Saving model ... Epoch: 3 Training Loss: 4.647744 Validation Loss: 4.478679 \Validation Accuracy: 3% (31/835) Validation loss decreased (4.605181 --> 4.478679). Saving model ... Epoch: 4 Training Loss: 4.563150 Validation Loss: 4.411365 \Validation Accuracy: 4% (40/835) Validation loss decreased (4.478679 --> 4.411365). Saving model ... Epoch: 5 Training Loss: 4.532567 Validation Loss: 4.337705 \Validation Accuracy: 5% (48/835) Validation loss decreased (4.411365 --> 4.337705). Saving model ... Epoch: 6 Training Loss: 4.492235 Validation Loss: 4.427061 \Validation Accuracy: 5% (47/835) Epoch: 7 Training Loss: 4.449287 Validation Loss: 4.282720 \Validation Accuracy: 6% (51/835) Validation loss decreased (4.337705 --> 4.282720). Saving model ... Epoch: 8 Training Loss: 4.390846 Validation Loss: 4.175096 \Validation Accuracy: 6% (57/835) Validation loss decreased (4.282720 --> 4.175096). Saving model ... Epoch: 9 Training Loss: 4.338155 Validation Loss: 4.143253 \Validation Accuracy: 6% (58/835) Validation loss decreased (4.175096 --> 4.143253). Saving model ... Epoch: 10 Training Loss: 4.295616 Validation Loss: 4.182347 \Validation Accuracy: 6% (52/835) Epoch: 11 Training Loss: 4.231279 Validation Loss: 4.041329 \Validation Accuracy: 10% (86/835) Validation loss decreased (4.143253 --> 4.041329). Saving model ... Epoch: 12 Training Loss: 4.190391 Validation Loss: 4.037614 \Validation Accuracy: 8% (73/835) Validation loss decreased (4.041329 --> 4.037614). Saving model ... Epoch: 13 Training Loss: 4.152363 Validation Loss: 3.965266 \Validation Accuracy: 8% (73/835) Validation loss decreased (4.037614 --> 3.965266). Saving model ... Epoch: 14 Training Loss: 4.103111 Validation Loss: 4.093174 \Validation Accuracy: 9% (76/835) Epoch: 15 Training Loss: 4.084281 Validation Loss: 3.965945 \Validation Accuracy: 9% (77/835) Epoch: 16 Training Loss: 4.049957 Validation Loss: 3.966330 \Validation Accuracy: 10% (86/835) Epoch: 17 Training Loss: 3.984346 Validation Loss: 3.852653 \Validation Accuracy: 10% (91/835) Validation loss decreased (3.965266 --> 3.852653). Saving model ... Epoch: 18 Training Loss: 3.993958 Validation Loss: 3.897558 \Validation Accuracy: 10% (85/835) Epoch: 19 Training Loss: 3.941215 Validation Loss: 3.849659 \Validation Accuracy: 10% (88/835) Validation loss decreased (3.852653 --> 3.849659). Saving model ... Epoch: 20 Training Loss: 3.878452 Validation Loss: 3.849863 \Validation Accuracy: 13% (110/835) Epoch: 21 Training Loss: 3.866530 Validation Loss: 4.009155 \Validation Accuracy: 10% (85/835) Epoch: 22 Training Loss: 3.895594 Validation Loss: 3.773769 \Validation Accuracy: 12% (104/835) Validation loss decreased (3.849659 --> 3.773769). Saving model ... Epoch: 23 Training Loss: 3.787392 Validation Loss: 3.761111 \Validation Accuracy: 11% (99/835) Validation loss decreased (3.773769 --> 3.761111). Saving model ... Epoch: 24 Training Loss: 3.761359 Validation Loss: 3.783298 \Validation Accuracy: 11% (100/835) Epoch: 25 Training Loss: 3.741848 Validation Loss: 3.684466 \Validation Accuracy: 14% (117/835) Validation loss decreased (3.761111 --> 3.684466). Saving model ... Epoch: 26 Training Loss: 3.702312 Validation Loss: 3.884583 \Validation Accuracy: 12% (103/835) Epoch: 27 Training Loss: 3.676271 Validation Loss: 3.725848 \Validation Accuracy: 14% (120/835) Epoch: 28 Training Loss: 3.612815 Validation Loss: 3.839589 \Validation Accuracy: 13% (112/835) Epoch: 29 Training Loss: 3.587207 Validation Loss: 3.815385 \Validation Accuracy: 13% (113/835) Epoch: 30 Training Loss: 3.578809 Validation Loss: 3.697286 \Validation Accuracy: 14% (119/835) ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, device): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU data, target = data.to(device), target.to(device) # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # call test function test(loaders_scratch, model_scratch, criterion_scratch, device) ###Output Test Loss: 3.653413 Test Accuracy: 14% (121/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders data_dir = '/home/en/Git/deep-learning-datasets/dogImages' batch_size = 32 num_workers = 6 image_size = 224 # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(image_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) valid_data = datasets.ImageFolder(data_dir + '/valid', transform=test_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) data_transfer = {'train':train_data,'valid':valid_data,'test':test_data} trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) loaders_transfer = {'train':trainloader, 'valid':validloader,'test':testloader} numClasses = len(train_data.classes) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torch import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # define VGG16 model model_transfer = models.vgg16(pretrained=True) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # cuda model_transfer.to(device) # Freeze training for all layers for param in model_transfer.features.parameters(): param.require_grad = False features = list(model_transfer.classifier.children())[:-1] features # Newly created modules have require_grad=True by default num_features = model_transfer.classifier[6].in_features features = list(model_transfer.classifier.children())[:-1] # Remove last layer features.extend([nn.Linear(num_features, numClasses)]) # Add our layer with 4 outputs model_transfer.classifier = nn.Sequential(*features) # Replace the model classifier print(model_transfer) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ The VGG16 network was chosen for the transfer learning task due to its inherent dog-classification abilities (as per part 1).The model was loaded and all of its parameters were held constant. The last layer was replaced with a new layer for dog breed classfication. Since the VGG16 network already encodes many image features, it was a logical first step to simply modify the last output classification layer, re-train it for dog-breeds, and see if the performance meets the requirements. In this case, the technique was applicable and produced results above the test criterium. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model_transfer.to(device) # train the model model_transfer = train(30, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, device, 'model_transfer.pt') ###Output Epoch: 1 Training Loss: 4.313979 Validation Loss: 2.864349 Test Accuracy: 33% (282/835) Validation loss decreased (inf --> 2.864349). Saving model ... Epoch: 2 Training Loss: 2.795596 Validation Loss: 1.489127 Test Accuracy: 56% (472/835) Validation loss decreased (2.864349 --> 1.489127). Saving model ... Epoch: 3 Training Loss: 2.212715 Validation Loss: 1.442618 Test Accuracy: 58% (490/835) Validation loss decreased (1.489127 --> 1.442618). Saving model ... Epoch: 4 Training Loss: 1.925824 Validation Loss: 1.318540 Test Accuracy: 61% (514/835) Validation loss decreased (1.442618 --> 1.318540). Saving model ... Epoch: 5 Training Loss: 1.752285 Validation Loss: 1.324152 Test Accuracy: 59% (495/835) Epoch: 6 Training Loss: 1.615041 Validation Loss: 1.173187 Test Accuracy: 64% (536/835) Validation loss decreased (1.318540 --> 1.173187). Saving model ... Epoch: 7 Training Loss: 1.534569 Validation Loss: 0.857402 Test Accuracy: 73% (611/835) Validation loss decreased (1.173187 --> 0.857402). Saving model ... Epoch: 8 Training Loss: 1.446539 Validation Loss: 0.819263 Test Accuracy: 73% (614/835) Validation loss decreased (0.857402 --> 0.819263). Saving model ... Epoch: 9 Training Loss: 1.441129 Validation Loss: 0.892965 Test Accuracy: 71% (600/835) Epoch: 10 Training Loss: 1.309792 Validation Loss: 0.724557 Test Accuracy: 77% (644/835) Validation loss decreased (0.819263 --> 0.724557). Saving model ... Epoch: 11 Training Loss: 1.262984 Validation Loss: 1.042309 Test Accuracy: 68% (571/835) Epoch: 12 Training Loss: 1.261453 Validation Loss: 0.971771 Test Accuracy: 70% (585/835) Epoch: 13 Training Loss: 1.217257 Validation Loss: 1.022039 Test Accuracy: 70% (586/835) Epoch: 14 Training Loss: 1.210520 Validation Loss: 0.755887 Test Accuracy: 76% (641/835) Epoch: 15 Training Loss: 1.149395 Validation Loss: 0.699051 Test Accuracy: 78% (659/835) Validation loss decreased (0.724557 --> 0.699051). Saving model ... Epoch: 16 Training Loss: 1.103256 Validation Loss: 0.881275 Test Accuracy: 73% (611/835) Epoch: 17 Training Loss: 1.118674 Validation Loss: 0.722689 Test Accuracy: 77% (650/835) Epoch: 18 Training Loss: 1.062260 Validation Loss: 0.915520 Test Accuracy: 73% (614/835) Epoch: 19 Training Loss: 1.049746 Validation Loss: 0.926707 Test Accuracy: 72% (608/835) Epoch: 20 Training Loss: 1.035079 Validation Loss: 0.743201 Test Accuracy: 78% (656/835) Epoch: 21 Training Loss: 1.021337 Validation Loss: 0.838226 Test Accuracy: 75% (634/835) Epoch: 22 Training Loss: 1.012789 Validation Loss: 0.748785 Test Accuracy: 79% (662/835) Epoch: 23 Training Loss: 0.950456 Validation Loss: 0.630063 Test Accuracy: 81% (677/835) Validation loss decreased (0.699051 --> 0.630063). Saving model ... Epoch: 24 Training Loss: 0.965854 Validation Loss: 0.711072 Test Accuracy: 78% (652/835) Epoch: 25 Training Loss: 0.980034 Validation Loss: 0.812720 Test Accuracy: 77% (645/835) Epoch: 26 Training Loss: 0.950037 Validation Loss: 0.948506 Test Accuracy: 73% (616/835) Epoch: 27 Training Loss: 0.935244 Validation Loss: 0.933499 Test Accuracy: 74% (621/835) Epoch: 28 Training Loss: 0.919783 Validation Loss: 0.854294 Test Accuracy: 76% (637/835) Epoch: 29 Training Loss: 0.884965 Validation Loss: 0.808719 Test Accuracy: 77% (646/835) Epoch: 30 Training Loss: 0.882553 Validation Loss: 0.716503 Test Accuracy: 78% (658/835) ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, device) ###Output Test Loss: 0.689320 Test Accuracy: 80% (670/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed # set model to eval: model_scratch.eval() # load image I = Image.open(img_path) # resize to 224 x 224 I = I.resize((224, 224), Image.ANTIALIAS) # convert to np array: im = np.array(I)/255.0 # transpose fo 3 x 224 x 224 im = np.transpose(im,(2,0,1)) # add dimension to simulate batch for vgg model: im = np.expand_dims(im,0) # convert im data to tensor: t = torch.tensor(im) t = t.to(device) # conver to float t = t.float() # run model with softmax output to normalize output weights ps = torch.softmax(model_scratch.forward(t),1) # get index of max weight: _, i = ps.max(1) # Try with transfer learning as well: ps = torch.softmax(model_transfer.forward(t),1) # get index of max weight: _, j = ps.max(1) return class_names[i], class_names[j] # predicted class from scratch & transfer models ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. # load models... # load the model that got the best validation accuracy device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model_scratch.load_state_dict(torch.load('model_scratch.pt')) model_scratch.to(device) model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer.to(device) def run_app(img_path): ## handle cases for a human face, dog, and neither # step 1. check if human using human detector: if(face_detector(img_path)): print("I think you are human!") elif(dog_detector(img_path)): print("Looks like a dog to me!") else: print("I don't know what this is, but I don't think its a dog or a human!") # load image locally: img = cv2.imread(img_path) # print image: # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() # run classification: pred_scratch, pred_transfer = predict_breed_transfer(img_path) print("Simple model thinks you are a " + pred_scratch) print("Transfer model thinks you are a " + pred_transfer) print("\n-------------\n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__The human detector worked well as expected, but the dog-breed classifier trained with the scratch model did not work as well.The breed classification with the scratch model worked almost as expected - which was not too well. The test accuracy of 14% meant that only about 1 in 8 images from the classes trained would be correctly identified. Further, the simplicity of the model, combined with the large similarity between certain breeds would make it difficult for the scratch model to perform very well.Therefore the following could be done to improve the scratch model:1. Increase the number of filters and convolutional layers - this would produce more filters to learn the finer detailed differences between similar breeds.2. Add batch normalization to keep weights controlled in between layers - this was one step that the VGG network included which the scratch network did not. This should make the weights more managable as the gradient is back-propagated without it being squashed.3. increase the dataset with more augmentation. The Pytorch image augramention (according to forum & documentation) creates one augmented image per transform during run-time training. Therefore, using 4 individual transforms and a compose would result in 4 new images, one for each transform. This can approach can be replaces with a pre-computed augmented data set that further increases the number of transforms by not only creating individually transformed images, but combinations of transforms. This should create a more diverse input dataset for training. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output I think you are human! ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code human_files_short = human_files[:100] dog_files_short = dog_files[:100] from tqdm import tqdm #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. detected_human_count = 0 for file in human_files_short: if face_detector(file): detected_human_count += 1 print('Face detected rate for humans is: %d/%d'%(detected_human_count, 100 )) detected_human_count = 0 for file in dog_files_short: if face_detector(file): detected_human_count += 1 print('Face detected rate for dogs is: %d/%d'%(detected_human_count, 100 )) ###Output Face detected rate for humans is: 98/100 Face detected rate for dogs is: 17/100 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() device = 'cuda' if use_cuda else 'cpu' # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:07<00:00, 71837260.91it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image, ImageFile import torchvision.transforms as transforms ImageFile.LOAD_TRUNCATED_IMAGES = True def load_image(img_path, shape=224): image = Image.open(img_path).convert('RGB') transform = transforms.Compose([ transforms.Resize((shape,shape)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) image = transform(image) image = image[:3,:,:] return image load_image(dog_files[0]).size() def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = load_image(img_path).to(device) input = image.unsqueeze(0) output = VGG16(input) _, pred = torch.max(output,1) return pred.item() # predicted class index pred_index = VGG16_predict(human_files[0]) pred_index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return index >= 151 and index <= 268 # true/false dog_detector(dog_files[0]) dog_detector(human_files[0]) print(dog_files[50:]) ###Output ['/data/dog_images/train/103.Mastiff/Mastiff_06868.jpg' '/data/dog_images/train/103.Mastiff/Mastiff_06821.jpg' '/data/dog_images/train/103.Mastiff/Mastiff_06869.jpg' ..., '/data/dog_images/valid/100.Lowchen/Lowchen_06682.jpg' '/data/dog_images/valid/100.Lowchen/Lowchen_06708.jpg' '/data/dog_images/valid/100.Lowchen/Lowchen_06684.jpg'] ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. detected_count = 0 for file in human_files_short: if dog_detector(file): detected_count += 1 print('Dog detected rate for humans is: %d/%d'%(detected_count, 100 )) detected_count = 0 for file in dog_files_short: if dog_detector(file): detected_count += 1 print('Dog detected rate for dogs is: %d/%d'%(detected_count, 100 )) ###Output Dog detected rate for humans is: 1/100 Dog detected rate for dogs is: 100/100 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transform = transforms.Compose([ transforms.RandomHorizontalFlip(), # randomly flip and rotate transforms.RandomRotation(10), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) batch_size = 20 num_workers = 0 data_dir = '/data/dog_images/' dataset_names = ['train','valid','test'] dog_datasets = {} dataset_loaders = {} for name in dataset_names: image_dir = os.path.join(data_dir, f'{name}/') data = datasets.ImageFolder(image_dir, transform=transform) loader = torch.utils.data.DataLoader(data, batch_size=batch_size, num_workers=num_workers, shuffle=True) dog_datasets[name] = data dataset_loaders[name] = loader print('data_loaders: ', dataset_loaders) print('size of train data: ', len(dataset_loaders['train'])) print('size of valid data: ', len(dataset_loaders['valid'])) print('size of test data: ', len(dataset_loaders['test'])) ###Output data_loaders: {'train': <torch.utils.data.dataloader.DataLoader object at 0x7fc88c054da0>, 'valid': <torch.utils.data.dataloader.DataLoader object at 0x7fc88c054eb8>, 'test': <torch.utils.data.dataloader.DataLoader object at 0x7fc88c054f98>} size of train data: 334 size of valid data: 42 size of test data: 42 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:* 我用拉伸作为缩放方式。使用 (3,224,224) 大小作为输入,因为在 torchvision.models 的[文档](https://pytorch.org/docs/stable/torchvision/models.html)中说,输入大小至少要有 224。我们没有 flatten 图像,而是用3个通道的2维矩阵来表示图像,因为这样可以让图像特征的位置信息得意保留。* 我试用了「水平翻转」和「旋转」来对图片进行增强,因为这样可以让模型更加关注于图像特征,而非角度特征。 (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. 先计算一下总共有多少种狗的品种: ###Code maxTargets = [] for batch_idx, (data, target) in enumerate(dataset_loaders['train']): maxTargets.append(torch.max(target)) for batch_idx, (data, target) in enumerate(dataset_loaders['valid']): maxTargets.append(torch.max(target)) for batch_idx, (data, target) in enumerate(dataset_loaders['test']): maxTargets.append(torch.max(target)) import numpy as np # max target value np.max([a.item() for a in maxTargets]) import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.pool2 = nn.MaxPool2d(2, 2) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool3 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 28 * 28, 4096) self.fc2 = nn.Linear(4096, 512) self.fc3 = nn.Linear(512, 133) self.dropout = nn.Dropout(0.2) def forward(self, x): ## Define forward behavior x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = self.pool3(F.relu(self.conv3(x))) # flatten image input x = x.view(-1, 64 * 28 * 28) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() model_scratch ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ step 11. 先构建3个卷积层,最终得到 64 个特征层2. 本来想每一个池化层让 size 减半,但是发现全链层的输入数据 size 过大。所以最后一个池化层让 size 除以 4。3. 为了避免过拟合,我增加了一个丢失率为 15% 的 dropout 层4. 因为最终有 133 个 bread,最终输出 size 是 133 。 step 2观测到训练过程中 loss 降低非常慢,所以将 SGD 的 lr 做了调整:0.01 => 0.05。结果:loss 降低速度略微增加,但仍然很慢。avag loss = 4.45, accuracy = 4% step 31. pool2 step: 2 => 42. fc1 output size: 1024 => 40963. fc2 output size: 256 => 5124. dropout.p: 0.15 => 0.05结果:loss 降低速度更慢了,几乎不动。感觉调整方向错误了,应该减少 parameter 数量。 step 41. fc1 output size: 4096 => 5122. fc2 output size: 512 => 2563. dropout.p: 0.05 => 0.15结果:loss 降低速度毫无变化。 step 5感觉之前的调整方向不对。再往回调整:1. pool2 step: 22. fc1 output size: 40963. fc2 output size: 5124. dropout.p: 0.15 => 0.05结果:* loss 稳定降低,速度稍微增加。 * 训练 8 个 epoch 后,train_loss 降低到 3.8,但是 valid_loss 却增加了,出现了过拟合现象。* 10 个 epoch 后,valid_loss 降低到 3.95。accuracy 提高到 9% step 6考虑到 出现一些过拟合,我将 dropout.p 提高到 0.2结果:* 5 个 epoch 后,accuracy 提高到 11% (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code loaders_scratch = dataset_loaders valid_loss_min = np.Inf def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss global valid_loss_min for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # print(f'batch_index:{batch_idx}, len: {len(data)}, data.size:{data.size()}, target.size:{target.size()}') # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) # print(f'output.size:{output.size()}, target.size:{target.size()}') loss = criterion(output, target) ## find the loss and update the model parameters accordingly loss.backward() optimizer.step() ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) train_loss += (loss.data-train_loss)/(batch_idx+1) if batch_idx % 20 == 19: print(f'Epoch: {epoch} \tBatch idx: {batch_idx} \t Curr Loss: {loss.data} \tAvrg loass: {train_loss}') ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += (loss.data-valid_loss)/(batch_idx+1) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model model_scratch.dropout = nn.Dropout(0.2) # train the model model_scratch = train(5, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.016365 Test Accuracy: 11% (97/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture vgg16 = models.vgg16(pretrained=True) for param in vgg16.features.parameters(): param.requires_grad = False n_classes = 133 n_inputs = vgg16.classifier[6].in_features last_layer = nn.Linear(n_inputs, n_classes) vgg16.classifier[6] = last_layer model_transfer = vgg16 if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ 1. VGG16 有着简单的结构,并且在 ImageNet 竞赛中成绩优异,所以我想选择 VGG16 作为此次项目的结构。2. 选择这个模型是因为 VGG16 的训练模型主要也是动物和一些日常可以见到的物品,跟我们现在要进行的训练种类很相似。3. 因为我们这个项目的训练集不是很大,只有 8000 个,所以,不适合从头开始训练。所以,我先尝试只对最后一层进行训练。4. 最终,训练 6 个 epoch 后,结果已经很不错了。 (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code n_epochs = 10 valid_loss_min = np.Inf # train the model model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.505306 Test Accuracy: 85% (714/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code dog_datasets['train'].classes ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in dog_datasets['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = load_image(img_path).to(device) input = image.unsqueeze(0) output = model_transfer(input) _, pred = torch.max(output,1) index = pred.item() # predicted class index name = class_names[index] return name dog_files[50:] len(dog_files) print('bread of file %s is %s'%(dog_files[30], predict_breed_transfer(dog_files[30]))) print('bread of file %s is %s'%(dog_files[1200], predict_breed_transfer(dog_files[1200]))) print('bread of file %s is %s'%(dog_files[5000], predict_breed_transfer(dog_files[5000]))) ###Output bread of file /data/dog_images/train/103.Mastiff/Mastiff_06818.jpg is Mastiff bread of file /data/dog_images/train/104.Miniature_schnauzer/Miniature_schnauzer_06922.jpg is Petit basset griffon vendeen bread of file /data/dog_images/train/040.Bulldog/Bulldog_02823.jpg is Bulldog ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if(dog_detector(img_path)): bread = predict_breed_transfer(img_path) return 'Dog is detected! The bread is: %s.'%(bread) elif(face_detector(img_path)): bread = predict_breed_transfer(img_path) return 'Human is detected! He is like: %s.'%(bread) else: return 'Nothing detected.' ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)1. 对训练集做一些 Augmention,比如裁剪和旋转。2. 换成更复杂的模型,比如 GoogLeNet,来增加准确率。3. 在做迁移学习的时候,可适当的将后面的卷基层也做一些更新,来让模型更加适应小狗分类这个任务。 ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:6], dog_files[:6])): print('Result of {} is: [{}]'.format(file, run_app(file))) ###Output Result of /data/lfw/Dan_Ackroyd/Dan_Ackroyd_0001.jpg is: [Human is detected! He is like: Welsh springer spaniel.] Result of /data/lfw/Alex_Corretja/Alex_Corretja_0001.jpg is: [Human is detected! He is like: Dachshund.] Result of /data/lfw/Daniele_Bergamin/Daniele_Bergamin_0001.jpg is: [Human is detected! He is like: Bearded collie.] Result of /data/lfw/Donald_Carty/Donald_Carty_0001.jpg is: [Human is detected! He is like: Bull terrier.] Result of /data/lfw/Barry_Switzer/Barry_Switzer_0001.jpg is: [Human is detected! He is like: Italian greyhound.] Result of /data/lfw/Jeong_Se-hyun/Jeong_Se-hyun_0003.jpg is: [Human is detected! He is like: Alaskan malamute.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06833.jpg is: [Dog is detected! The bread is: Bullmastiff.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06826.jpg is: [Dog is detected! The bread is: Mastiff.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06871.jpg is: [Dog is detected! The bread is: Bullmastiff.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06812.jpg is: [Dog is detected! The bread is: Mastiff.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06831.jpg is: [Dog is detected! The bread is: Mastiff.] Result of /data/dog_images/train/103.Mastiff/Mastiff_06867.jpg is: [Dog is detected! The bread is: Mastiff.] ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ > human_perc: 96%> dog_perc: 18% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] i=0 for img_path in human_files_short: if face_detector(img_path) == True: i+=1 human_perc = i/100 j=0 for img_path in dog_files_short: if face_detector(img_path) == True: j+=1 dog_perc = j/100 print(human_perc) print(dog_perc) ###Output 0.96 0.18 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' imsize = 224 vgg16 = models.vgg16(pretrained=True) transform = transforms.Compose([ transforms.Resize(imsize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) img = Image.open(img_path) input_img = transform(img) out = vgg16(input_img.unsqueeze(0)) return torch.argmax(vgg16(input_img.unsqueeze(0)).cuda()).data.tolist() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): is_dog=False pred = VGG16_predict(img_path) if pred >= 151 and pred <= 268: is_dog = True return is_dog # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ > human_files_short: 0%> dog_files_short: 96% ###Code from tqdm.notebook import tqdm j=0 for img_path in dog_files_short: if dog_detector(img_path) == True: j+=1 dog_perc = j/100 print('dog_perc: ', dog_perc) j=0 for img_path in human_files_short: if dog_detector(img_path) == True: j+=1 human_perc = j/100 print('human_perc: ', human_perc) ###Output 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [13:33<00:00, 8.13s/it] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code from collections import OrderedDict import glob classes = OrderedDict() for breed in glob.glob('dogImages/train' + '*/*/*.*', recursive=True): classes[breed.split('\\')[2].split('.')[1]] = None classes = list(classes.keys()) def get_class_from_path(path): return torch.tensor(int(path.split(".")[0].split("\\")[-1])-1) def get_class_name_from_path(path): return path.split('\\')[2].split('.')[-1] import os import torch from torch.utils.data import Dataset, DataLoader from pathlib import Path import glob import torchvision.transforms as transforms from PIL import Image use_cuda = torch.cuda.is_available() def get_class_from_path(path): return torch.tensor(int(path.split(".")[0].split("\\")[-1])-1) class CustomDataset(Dataset): def __init__(self, root_dir, transform): self.root_dir = root_dir self.transform = transform def __len__(self): return len(glob.glob(self.root_dir + '*/*/*.*', recursive=True)) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_paths = list(glob.glob(self.root_dir + '*/*/*.*', recursive=True)) img = Image.open(img_paths[idx]) target = get_class_from_path(img_paths[idx]) img_transf = self.transform(img) if use_cuda: img_transf = img_transf.cuda() return img_transf, target transform_train = transforms.Compose([ transforms.CenterCrop((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomPerspective(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.RandomErasing(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) transform_valid = transforms.Compose([ transforms.CenterCrop((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_data = CustomDataset('dogImages/train', transform_train) valid_data = CustomDataset('dogImages/valid', transform_valid) test_data = CustomDataset('dogImages/test', transform_valid) batch_size = 64 num_workers = 0 dataloader_train = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) dataloader_valid= DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) dataloader_test = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:> 1) First I crop the image, then convert it to a tensor, and then normalize it > 2) I'm doing a center crop, the input tensor is (4, 224, 224, 3). That is (batch_size, width, height, channels) >3) I'm using Random Horizontal Flip, Perspective, Rotation and Random Erasing (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1) # in: 3x224x224 out: 32x112x112 self.conv2 = nn.Conv2d(32, 64, 3, padding=1) # in: 32x112x112 out:64x56x56 self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # in: 64x56x56 out: 128x28x28 self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = nn.Linear(in_features=128 * 28 * 28, out_features=512) self.fc2 = nn.Linear(512, 133) self.dropout = nn.Dropout(p=0.5) def forward(self, x): ## Define forward behavior x = self.pool1(F.relu(self.conv1(x))) x = self.pool1(F.relu(self.conv2(x))) x = self.pool1(F.relu(self.conv3(x))) x = self.dropout(x) # flatten image input x = x.view(-1, 128 * 28 * 28) # add dropout layer x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ > I used three convolutional layers, followed by max pooling, because is an architecture I saw in previous lessons, and wanted to try if that would be enough.>Also added dropout to help not to overfit the data (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim criterion_scratch = nn.CrossEntropyLoss() optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from tqdm.notebook import tqdm from PIL import ImageFile import numpy as np ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf print("CUDA:", use_cuda) for epoch in tqdm(range(1, n_epochs+1),desc='Epochs'): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(tqdm(loaders['train'], desc='Train')): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid'], desc='Valid'): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## save the model if validation loss has decreased if valid_loss < valid_loss_min: valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) # return trained model return model loaders_scratch = {'train': dataloader_train, 'valid': dataloader_valid, 'test': dataloader_test} # train the model #model_scratch = train(5,loaders_scratch,model_scratch,optimizer_scratch,criterion_scratch,use_cuda,'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.537986 Test Accuracy: 17% (148/836) ###Markdown Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## Specify data loaders loaders_transfer = {'train': dataloader_train, 'valid': dataloader_valid, 'test': dataloader_test} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## Specify model architecture model_transfer = models.vgg16(pretrained=True) # Prevent weights from being updated for param in model_transfer.features.parameters(): param.requires_grad = False n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs, 133) # This has required_grad=True by default model_transfer.classifier[6] = last_layer if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ > I used vgg16 architecture, first I setted the requires_grad param of the vgg16 model to False, so the pretrained weights won't be updated while training.Then I replaced the final layer in the "classifier" group, I modified the input length for that linear layer, and also modified the number of classes with the number of dog breeds (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_scratch.parameters(), lr=0.1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model #model_transfer = train(5,loaders_transfer,model_transfer,optimizer_transfer,criterion_transfer,use_cuda,'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.119155 Test Accuracy: 70% (588/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. def process_one(img_path, transforms_valid): img = Image.open(img_path) img_transf = transforms_valid(img).unsqueeze(0) return img_transf.cuda() def predict_breed_transfer(img_path): pred = torch.argmax(model_transfer(process_one(img_path, transform_valid))) return classes[pred.detach().cpu().numpy().item()] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code def run_app(img_path): is_human = face_detector(img_path) is_dog = dog_detector(img_path) ## handle cases for a human face, dog, and neither if is_human or is_dog: return predict_breed_transfer(img_path) else: return 'Error, img has neither a dog nor ar human!' ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ > The outputs are pretty good, but no perfect, the predicted dog breeds aren't ok, but the dog hair colors is about the same, so it not that bad> Possible improvements are: * Train for more epochs* Add more data agumentations* Try other model architectures, for example resnet ###Code import glob human_files = glob.glob('custom/human/*.*', recursive=True) dog_files = glob.glob('custom/dog/*.*', recursive=True) ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): print('File: {}\nPred: {}'.format(file, run_app(file))) img = cv2.imread(file) plt.imshow(img) plt.show() ###Output File: custom/human\albert.jpg Pred: Chihuahua ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](https://pytorch.org/vision/stable/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) print(faces) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 [[ 70 67 113 113]] ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ ( percentage of faces in human file is 98.0 ...percentage of faces in dog file is 17.0 ) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. count_hf, count_df = 0,0 for i,j in zip(human_files_short, dog_files_short): count_hf = count_hf+1 if face_detector(i) else count_hf+0 count_df = count_df+1 if face_detector(j) else count_df+0 human_file_percent = count_hf/len(human_files_short) dog_file_percent = count_df/len(dog_files_short) print(f'percentage of faces in human file is {human_file_percent*100}') print(f'percentage of faces in dog file is {dog_file_percent*100} ') ###Output percentage of faces in human file is 98.0 percentage of faces in dog file is 17.0 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:25<00:00, 21967290.46it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. img = Image.open(img_path) transform = transforms.Compose([transforms.Resize(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image = transform(img) image = image.unsqueeze(0) if use_cuda: image = image.cuda() output = VGG16(image) ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return np.argmax(output.detach().numpy()) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. img = Image.open(img_path) transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image = transform(img) image = image.unsqueeze(0) if use_cuda: image = image.cuda() output = VGG16(image) output = output.cpu() index = np.argmax(output.detach().numpy()) return True if 151<= index <=268 else False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. count_hf, count_df = 0,0 for i,j in zip(human_files_short, dog_files_short): count_hf = count_hf+1 if dog_detector(i) else count_hf+0 count_df = count_df+1 if dog_detector(j) else count_df+0 human_file_percent = count_hf/len(human_files_short) dog_file_percent = count_df/len(dog_files_short) print(f'percentage of dog in human file is {human_file_percent*100}') print(f'percentage of dog in dog file is {dog_file_percent*100} ') ###Output percentage of dog in human file is 1.0 percentage of dog in dog file is 100.0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets,transforms import torch from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # size of the batch batch_size = 20 # No. of parellel cores num_workers = 0 ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transform = {} transform['train'] = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) transform['valid'] = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) transform['test'] = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # datasets train_data = datasets.ImageFolder('/data/dog_images/train',transform=transform['train']) valid_data = datasets.ImageFolder('/data/dog_images/valid',transform=transform['valid']) test_data = datasets.ImageFolder('/data/dog_images/test',transform=transform['test']) # data loaders loaders = {} loaders['train'] = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,shuffle=True) loaders['valid'] = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers,shuffle=False) loaders['test'] = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers,shuffle=False) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: - chosen to randomly crop the image with default size, aspect ratio and also resize the image to 224 as pretrained model prefer this dimension- Used horizontal flip and rotated images to 30 degrees randomly as data augmentation steps.- performed data augmentation techniques only in train dataset and resized, normalised images in validation and test data(no data augmentation)- normalised images based on the torch documentation required for pre-trained model. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3,32,3,stride=2,padding=1) self.conv2 = nn.Conv2d(32,64,3,stride=2,padding=1) self.conv3 = nn.Conv2d(64,128,3,padding=1) # Linear layers self.fc1 = nn.Linear(128*7*7, 1024) self.fc2 = nn.Linear(1024,512) self.fc3 = nn.Linear(512,133) # pooling layers self.pool = nn.MaxPool2d(2,2) # dropout layers self.dropout = nn.Dropout(0.2) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = F.relu(self.conv3(x)) x = self.pool(x) x = x.view(-1,128*7*7) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (fc1): Linear(in_features=6272, out_features=1024, bias=True) (fc2): Linear(in_features=1024, out_features=512, bias=True) (fc3): Linear(in_features=512, out_features=133, bias=True) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (dropout): Dropout(p=0.2) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ - I used three convolutional layers for extracting information from images by increasing the depth from 3 to 128 layers.- used three hidden fc layers as it is complicated to differentiate between each breed of dogs. - regularized the model with the help of dropouts probability of 0.2 with ReLU activation function as it very effective and avoids vanishing gradient problem. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # refresh optimizer optimizer.zero_grad() output = model_scratch(data) loss = criterion_scratch(output,target) loss.backward() optimizer.step() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model_scratch(data) val_loss = criterion_scratch(output,target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (val_loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(100, loaders, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt',map_location=lambda storage, loc:storage)) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.669464 Test Accuracy: 12% (103/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import os from torchvision import datasets,transforms import torch from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # size of the batch batch_size = 20 # No. of parellel cores num_workers = 0 ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transform = {} transform['train'] = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) transform['valid'] = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) transform['test'] = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # datasets train_data = datasets.ImageFolder('/data/dog_images/train',transform=transform['train']) valid_data = datasets.ImageFolder('/data/dog_images/valid',transform=transform['valid']) test_data = datasets.ImageFolder('/data/dog_images/test',transform=transform['test']) # data loaders loaders = {} loaders['train'] = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers,shuffle=True) loaders['valid'] = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers,shuffle=False) loaders['test'] = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers,shuffle=False) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained = True) model_transfer.classifier[6] = nn.Linear(4096,133) print(model_transfer) for param in model_transfer.features.parameters(): param.requires_grad = False use_cuda = torch.cuda.is_available() if use_cuda: model_transfer = model_transfer.cuda() ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ - used VGG16 model having 41 layers in which convolutional layers are arranged in a specific order that can extract even minute details in the image. considering the complexity of the task.- have modified the 3 fully connected layer a little bit to accomodate our expected result of 133 classes rather than 1000. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code from torch import optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(),lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code def train(n_epochs,loaders,model,optimizer,criterion,use_cuda,save_path): min_val_loss = np.Inf for epoch in range(n_epochs): train_loss = 0 valid_loss = 0 model.train() for batch_idx,(data,target) in enumerate(loaders['train']): if use_cuda: data,target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output,target) loss.backward() optimizer.step() train_loss = train_loss + (1/(batch_idx + 1))*(loss.data - train_loss) model.eval() for batch_idx,(data,target) in enumerate(loaders['valid']): if use_cuda: data,target = data.cuda(), target.cuda() output = model(data) val_loss = criterion(output,target) valid_loss = valid_loss + (1/(batch_idx + 1)) * (val_loss.data - valid_loss) print(f'At epoch: {epoch} ... training loss: {train_loss} ..... validation loss: {valid_loss} .....') if valid_loss <= min_val_loss: torch.save(model.state_dict(),save_path) print(f'validation loss decreased from {min_val_loss} to {valid_loss}. hence saving the model...') min_val_loss = valid_loss return model # train the model import numpy as np model_transfer = train(20, loaders, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') #load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) #load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt',map_location=lambda storage, loc:storage)) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.546462 Test Accuracy: 83% (702/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. from PIL import Image data_transfer = {} data_transfer['train'] = train_data # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path) transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image = transform(image) image = image.unsqueeze(0) if use_cuda: image = image.cuda() output = np.argmax(model_transfer(image).detach().cpu().numpy()) #output = model_transfer(image).detach().cpu().numpy().shape return class_names[output] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt def run_app(img_path): ## handle cases for a human face, dog, and neither breed = predict_breed_transfer(img_path) img = Image.open(img_path) plt.imshow(img) if face_detector(img_path): print('Hello, Human') print(f'you resemble like ...{breed} dog') elif dog_detector(img_path): print(f'the predicted breed of the dog is {breed}') else: print('The image neither contains dog nor human. Provide a correct image.') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ the output is far better. - still we can train the model for more epochs. - would be great to have more training images.- can reduce the prediction time of the model. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output Hello, Human you resemble like Beagle Hello, Human you resemble like Dachshund Hello, Human you resemble like Cocker spaniel the predicted bree of the dog is Bullmastiff the predicted bree of the dog is Mastiff the predicted bree of the dog is Bullmastiff ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](https://pytorch.org/vision/stable/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces_detected = sum([1 if face_detector(h) is True else 0 for h in human_files_short]) dogs_detected = sum([1 if face_detector(d) is True else 0for d in dog_files_short ]) print("Accuracy for Human images (short): {:.2f}%".format(100*human_faces_detected/len(human_files_short))) print("Accuracy for Dog images (short): {:.2f}%".format(100*dogs_detected/len(dog_files_short))) ###Output Accuracy for Human images (short): 99.00% Accuracy for Dog images (short): 10.00% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from torch.autograd import Variable def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image im = Image.open(img_path) im = transforms.Resize((224,224))(im) im = Variable(transforms.ToTensor()(im).unsqueeze(0)) if use_cuda: im = im.cuda() return VGG16(im) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): prediction = VGG16_predict(img_path) _, pred = torch.max(prediction, 1) if pred.item() in range(151,269): return True return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. humans_detected_vgg16 = sum([1 if dog_detector(img_path) is True else 0 for img_path in human_files_short]) dogs_detected_vgg16 = sum([1 if dog_detector(img_path) is True else 0 for img_path in dog_files_short]) print("Percentage of dogs detected in human images: {:.2f}".format(100*humans_detected_vgg16/len(human_files_short))) print("Percentage of dogs detected in dog images: {:.2f}".format(100*dogs_detected_vgg16/len(dog_files_short))) ###Output Percentage of dogs detected in human images: 0.00 Percentage of dogs detected in dog images: 96.00 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms import torch import numpy as np from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes use_cuda = torch.cuda.is_available() batch_size = 80 num_workers = 0 data_dir = 'dogImages/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') standard_normalization = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), standard_normalization]), 'val': transforms.Compose([transforms.Resize(size=(224,224)), transforms.ToTensor(), standard_normalization]), 'test': transforms.Compose([transforms.Resize(size=(224,224)), transforms.ToTensor(), standard_normalization]) } train_data = datasets.ImageFolder(train_dir, transform=data_transforms['train']) valid_data = datasets.ImageFolder(valid_dir, transform=data_transforms['val']) test_data = datasets.ImageFolder(test_dir, transform=data_transforms['test']) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: - For training data, we randomly crop a 224*224 patch of the training image dataset. For validation, we first resize to 256x256, and then take a center-drop. For testing, we resize the image to 224x224 so as to not lose any information.- We apply a random horizontal flip with 50% probability to the train dataset to augment th dataset. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True num_classes = 133 # total classes of dog breeds import torch.nn as nn import torch.nn.functional as F import numpy as np # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # pool self.pool = nn.MaxPool2d(2, 2) # fully-connected self.fc1 = nn.Linear(7*7*128, 500) self.fc2 = nn.Linear(500, num_classes) # drop-out self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = F.relu(self.conv3(x)) x = self.pool(x) # flatten x = x.view(-1, 7*7*128) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) model_scratch = nn.DataParallel(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=6272, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.3) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Taking inspiration from VGG, we created a CNN of 3 layers with convolution layers followed by two fully-connected layers. We chose 3 layers as it seems to give a good trade-off b/w accuracy and time required for training. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path, last_validation_loss=None): """returns trained model""" # initialize tracker for minimum validation loss if last_validation_loss is not None: valid_loss_min = last_validation_loss else: valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # initialize weights to zero optimizer.zero_grad() output = model(data) # calculate loss loss = criterion(output, target) # back prop loss.backward() # grad optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) if batch_idx % 100 == 0: print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model, save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch = torch.load('model_scratch.pt') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.741892 Test Accuracy: 17% (143/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnext101_32x8d(pretrained=True) # Freeze the pre-trained weights for param in model_transfer.parameters(): param.required_grad = False last_in_features = model_transfer.fc.out_features last_out_features = num_classes last_layer = nn.Linear(last_in_features, last_out_features) augmented_model_transfer = nn.Sequential(model_transfer, last_layer) print(augmented_model_transfer) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs augmented_model_transfer = nn.DataParallel(augmented_model_transfer) if use_cuda: augmented_model_transfer = augmented_model_transfer.cuda() ###Output Sequential( (0): ResNet( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): Bottleneck( (conv1): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer2): Sequential( (0): Bottleneck( (conv1): Conv2d(256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer3): Sequential( (0): Bottleneck( (conv1): Conv2d(512, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (4): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (5): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (6): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (7): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (8): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (9): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (10): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (11): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (12): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (13): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (14): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (15): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (16): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (17): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (18): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (19): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (20): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (21): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (22): Bottleneck( (conv1): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer4): Sequential( (0): Bottleneck( (conv1): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(2048, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(2048, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(2048, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(2048, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False) (bn2): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(2048, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (avgpool): AdaptiveAvgPool2d(output_size=(1, 1)) (fc): Linear(in_features=2048, out_features=1000, bias=True) ) (1): Linear(in_features=1000, out_features=133, bias=True) ) Let's use 8 GPUs! ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer, augmented_model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer = torch.load('model_transfer.pt') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.350534 Test Accuracy: 91% (764/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def load_input_image(img_path): image = Image.open(img_path).convert('RGB') prediction_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), standard_normalization]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension image = prediction_transform(image)[:3,:,:].unsqueeze(0) return image def predict_breed_transfer(model, class_names, img_path): # load the image and return the predicted breed im = load_input_image(img_path) if use_cuda: im.cuda() model.eval() prediction = model(im) class_id = torch.argmax(prediction) return class_names[class_id] for img_file in os.listdir('./images'): img_path = os.path.join('./images', img_file) prediction = predict_breed_transfer(model_transfer, class_names, img_path) print("image_file_name: {0}, \t prediction breed: {1}".format(img_file, prediction)) ###Output image_file_name: sample_cnn.png, prediction breed: Maltese image_file_name: Labrador_retriever_06449.jpg, prediction breed: Labrador retriever image_file_name: Labrador_retriever_06457.jpg, prediction breed: Labrador retriever image_file_name: Curly-coated_retriever_03896.jpg, prediction breed: Curly-coated retriever image_file_name: American_water_spaniel_00648.jpg, prediction breed: American water spaniel image_file_name: Brittany_02625.jpg, prediction breed: Brittany image_file_name: Labrador_retriever_06455.jpg, prediction breed: Chesapeake bay retriever image_file_name: sample_human_output.png, prediction breed: Akita image_file_name: sample_dog_output.png, prediction breed: Entlebucher mountain dog image_file_name: Welsh_springer_spaniel_08203.jpg, prediction breed: Welsh springer spaniel ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither # detect dog image = Image.open(img_path) plt.imshow(image) plt.show() if dog_detector(img_path) is True: print("Dog detected.") dog_breed = predict_breed_transfer(model_transfer, class_names, img_path) print("Predicted breed: ", dog_breed) elif face_detector(img_path) > 0: print("Human detected.") resembling_breed = predict_breed_transfer(model_transfer, class_names, img_path) print("Resembles dog breed: ", resembling_breed) else: print("Error! Nothing detected!") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ 1. Hyperparameter tuning (experimenting with learning-rate, batch-size, weight-initalization, etc)2. More images of dogs3. More pre-processing of data4. Ensemble of models ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code !ls dogImages !ls /content/deep-learning-v2-pytorch/project-dog-classification/images google_colab = False local_dev = True import os !cd /content files = ["dogImages.zip","lfw.zip"] for file in files: if not os.path.isfile(file): !curl -O https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/{file} !unzip -q dogImages.zip !unzip -q lfw.zip if google_colab: !git clone https://github.com/udacity/deep-learning-v2-pytorch !pip3 install torchvision %reload_ext autoreload %autoreload project_path = "/content/deep-learning-v2-pytorch/project-dog-classification/" if local_dev: project_path = "/Users/brad/googleDrive/version-control/deep-learning-v2-pytorch/project-dog-classification/" import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier(project_path + 'haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code def face_show(path): # load color (BGR) image img = cv2.imread(path) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face if len(faces) > 0: for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. num_human = 0 num_human_in_dog = 0 for human in tqdm(human_files_short): if face_detector(human): num_human += 1 else: pass # face_show(human) for dog in tqdm(dog_files_short): if face_detector(dog): num_human_in_dog += 1 # face_show(dog) print(f'\nNumber of human pictures without humans: {100-num_human}%\n \ Number of Humans in dog pictures: {num_human_in_dog}%') ###Output 100%|██████████| 100/100 [00:01<00:00, 55.64it/s] 100%|██████████| 100/100 [00:09<00:00, 11.10it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models import torch.nn.functional as F # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /Users/brad/.torch/models/vgg16-397923af.pth 20%|██ | 113434624/553433881 [10:10<39:29, 185664.54it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code print(VGG16) from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image VGG16.cuda() img = Image.open(img_path) img = valid_transforms(img).float() img = torch.autograd.Variable(img, requires_grad=False) img = img.unsqueeze(0) img = img.cuda() out = VGG16(img) ps = F.softmax(out, dim=1) top_p, top_k = ps.topk(1, dim=1) top_k = top_k.view(-1,) return top_k # predicted class index # return # for images, labels in trainloader: # images, labels = images.cuda(), labels.cuda() # print(images.shape) # out = VGG16(images) # ps = F.softmax(out) # top_p, top_k = ps.topk(1, dim=1) # return top_k # predicted class index import random spot = random.randint(0, 100) print(dog_files_short[spot]) VGG16_predict(dog_files_short[spot]) ###Output dogImages/valid/034.Boxer/Boxer_02409.jpg ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code #referenced code from: https://discuss.pytorch.org/t/how-to-classify-single-image-using-loaded-net/1411 ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. p = VGG16_predict(img_path) if (p >=151) and (p<=268): return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. num_dog = 0 num_dog_in_human = 0 for human in tqdm(human_files_short): if dog_detector(human): num_dog_in_human += 1 else: pass # face_show(human) for dog in tqdm(dog_files_short): if dog_detector(dog): num_dog += 1 # face_show(dog) print(f'\nNumber of human pictures with dogs: {num_dog_in_human}%\n \ Number of dogs in dog pictures: {num_dog}%') ###Output 100%|██████████| 100/100 [00:03<00:00, 28.71it/s] 100%|██████████| 100/100 [00:05<00:00, 12.43it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:**How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?**My code resizes the training data with a random resize crop, which will output images with both translational and scale variance. For validation and test data the data is only scaled and center-cropped in order to best evaluate testing on naive datasets.My input tensor is 224x224, I picked this because my research suggested this to be the size of input preferred by the VGG16 network.**Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not?**I chose to use rotation, random resized crop and random horizontal flip to augment the training data. I did not augment the validation and test datasets. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I knew beforehand I wanted to use 3 convolutional layers and have a final layer depth of 28x28x30, as this is my favorite network structure. I chose to reduce height and width through maxpooling, as that has been show to be effective in CNNs. At the end of the convolutional layers I included a 2 layer fully connected layer as a classifier to determine the final output of the network. My goal was to keep the network small as we do not have an extremely large dataset for each of the species of dog. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from google.colab import drive drive.mount('/content/gdrive', force_remount= False) !cp "/content/gdrive/My Drive/{csv_path.split("/")[-1]}" {csv_path} !cp "/content/gdrive/My Drive/{file_path.split("/")[-1]}" {project_path + file_path} !cp "/content/gdrive/My Drive/{file_path.split("/")[-1]}" {project_path + optim_path} from torchvision import datasets, transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch import numpy as np import os import pandas as pd from tqdm import tqdm # from https://stackoverflow.com/questions/12984426/python-pil-ioerror-image-file-truncated-with-big-images # to deal with truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True use_cuda = True project_path = "/content/deep-learning-v2-pytorch/project-dog-classification/" version_number = "11_1" file_path = "model_scratch_" + version_number + ".pt" csv_path = project_path + file_path.split(".")[0] + ".csv" optim_path = project_path + file_path.split(".")[0] + ".opt" batch_size = 128 print(f'Loading Data...') train_dir = '/content/dogImages/train' valid_dir = '/content/dogImages/valid' test_dir = '/content/dogImages/test' #Define the transforms for the training, validation, and testing sets print(f'Defining Transforms') train_transforms = transforms.Compose([transforms.RandomRotation(20), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transforms = transforms.Compose([transforms.Resize((224,224)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize((224,224)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) print(f'Creating Datasets ') #Load the datasets with ImageFolder train_data = datasets.ImageFolder(train_dir, transform=train_transforms) test_data = datasets.ImageFolder(test_dir, transform=test_transforms) valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True) # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding = 1) self.conv1_bn = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, 3, padding = 1) self.conv2_bn = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 64, 3, padding = 1) self.conv3_bn = nn.BatchNorm2d(64) self.maxpoola = nn.MaxPool2d(2) self.maxpoolb = nn.MaxPool2d(4) self.fc1 = nn.Linear(7*7*64, 64) self.out = nn.Linear(64, 133) self.batch_norm = nn.BatchNorm1d(num_features=64) self.dropout = nn.Dropout(p=0.5) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.maxpoola(x) x = self.conv1_bn(x) x = F.relu(self.conv2(x)) x = self.maxpoolb(x) x = self.conv2_bn(x) x = F.relu(self.conv3(x)) x = self.maxpoolb(x) x = self.conv3_bn(x) x = x.view(x.size(0), -1) x = self.dropout(x) x = self.fc1(x) x = F.relu(self.batch_norm(x)) x = self.dropout(x) x = self.out(x) x = torch.softmax(x, dim=1) return x def save_scratch(model, gpu, checkpoint_name): #saving the model # checkpoint = {'input_size': 2048, # 'output_size': 102, # 'hidden_layers': [each.out_features for each in classifier.hidden_layers], # 'state_dict': model.state_dict(), # 'gpu': gpu} torch.save(model.state_dict(), checkpoint_name) def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path, csv_path, optim_path): # Load in a pre-trained checkpoint # if False: print(f'{project_path + file_path}') if os.path.isfile(project_path + file_path): print(f'Pre-trained Checkpoint Found, Loading...') model_scratch.load_state_dict(torch.load(project_path + file_path)) print(f'finished loading pre-trained checkpoint') else: print(f'No Pre-trained checkpoint found, continuing without') # Load in a pre-trained optimizer # if False: print(f'{project_path + optim_path}') if os.path.isfile(project_path + optim_path): print(f'Pre-trained Optimizer Found, Loading...') optimizer_scratch.load_state_dict(torch.load(project_path + optim_path)) print(f'finished loading pre-trained checkpoint') else: print(f'No Pre-trained checkpoint found, continuing without') # Load in a prior training data print(f'{csv_path}') training_data = None if os.path.isfile(csv_path): print(f'Pre-trained Training Data Found, Loading...') training_data = pd.read_csv(csv_path, index_col=0) print(f'finished loading previous training data') else: print(f'No previous training data found, continuing without') training_data = pd.DataFrame(columns = ["epoch_number", "training_loss", "valid_loss", "valid_acc"]) #setup GDrive Saving #save somewhere where it's not going to disappear(Gdrive) from google.colab import drive drive.mount('/content/gdrive', force_remount= False) """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf past_epoch = 0 try: past_epoch = int(training_data.iloc[-1]['epoch_number']) except: pass start_epoch = max(past_epoch, 1) end_epoch = start_epoch + n_epochs for epoch in range(start_epoch+1, end_epoch+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 val_acc = [] ################### # train the model # ################### model.train() for batch_idx, (data, target) in tqdm(enumerate(loaders['train'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() out = model(data) loss = criterion(out, target) loss.backward() optimizer.step() train_loss += loss.item() ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) train_loss = train_loss * 1.0 / len(loaders['train']) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in tqdm(enumerate(loaders['valid'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss out = model(data) loss = criterion(out, target) valid_loss += loss.item() top_p, top_k = out.topk(1, dim=1) correct = top_k == target.view(top_k.shape) accuracy = torch.mean(correct.type(torch.cuda.FloatTensor)) val_acc.append(accuracy.item()) if batch_idx == len(loaders['valid'])-2: print(f'top_k: {top_k}, target: {target} ') valid_loss = valid_loss * 1.0 / len(loaders['valid']) # print training/validation statistics print('\nEpoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tValidation Accuracy: {:.6f}'.format( epoch, train_loss, valid_loss, np.mean(val_acc) )) ###################### # save the model # ###################### ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: valid_loss_min = valid_loss print(f'\n\n SAVING NEW AND BETTER MODEL!') save_scratch(model, use_cuda, save_path) save_scratch(optimizer, use_cuda, optim_path) # print(training_data) training_data = training_data.append({"epoch_number" : epoch, "training_loss" : train_loss, "valid_loss" : valid_loss, "valid_acc" : np.mean(val_acc)}, ignore_index=True) # print(training_data) print("saving CSV of training data for posterity\n\n") training_data.to_csv(csv_path) !cp {csv_path} "/content/gdrive/My Drive/{csv_path.split("/")[-1]}" !cp {project_path + file_path} "/content/gdrive/My Drive/{file_path.split("/")[-1]}" # return trained model return model # instantiate the CNN model_scratch = Net() ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.00030) # optimizer_scratch = optim.SGD(params=model_scratch.parameters(), lr=0.1, momentum=0.8) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() loaders_scratch = {"train" : trainloader, "valid" : validloader, "test" : testloader} # train the model model_scratch = train(200, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, project_path + file_path, csv_path, optim_path) # load the model that got the best validation accuracy # model_scratch.load_state_dict(torch.load(project_path + file_path)) model_scratch = Net() # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load(project_path + file_path)) ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.000030) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() loaders_scratch = {"train" : trainloader, "valid" : validloader, "test" : testloader} epoch = 10 train_loss = 15 valid_loss = 15 val_acc = 0.56 csv_path = project_path + file_path.split(".")[0] + "_2.csv" print(f'{csv_path}') training_data = None if os.path.isfile(csv_path): print(f'Pre-trained Training Data Found, Loading...') training_data = pd.read_csv(csv_path) print(f'finished loading previous training data') else: print(f'No previous training data found, continuing without') training_data = pd.DataFrame(columns = ["epoch_number", "training_loss", "valid_loss", "valid_acc"]) print(training_data) training_data = training_data.append({"epoch_number" : epoch, "training_loss" : train_loss, "valid_loss" : valid_loss, "valid_acc" : val_acc}, ignore_index=True) print(training_data) ###Output /content/deep-learning-v2-pytorch/project-dog-classification/model_scratch_002_2.csv No previous training data found, continuing without Empty DataFrame Columns: [epoch_number, training_loss, valid_loss, valid_acc] Index: [] epoch_number training_loss valid_loss valid_acc 0 10.0 15.0 15.0 0.56 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code from torchvision import datasets, transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch import numpy as np import os import pandas as pd from tqdm import tqdm # from https://stackoverflow.com/questions/12984426/python-pil-ioerror-image-file-truncated-with-big-images # to deal with truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True use_cuda = True project_path = "/content/deep-learning-v2-pytorch/project-dog-classification/" version_number = "01_1" file_path = "model_transfer_" + version_number + ".pt" csv_path = project_path + file_path.split(".")[0] + ".csv" optim_path = project_path + file_path.split(".")[0] + ".opt" ## TODO: Specify data loaders batch_size = 64 print(f'Loading Data...') train_dir = '/content/dogImages/train' valid_dir = '/content/dogImages/valid' test_dir = '/content/dogImages/test' #Define the transforms for the training, validation, and testing sets print(f'Defining Transforms') train_transforms = transforms.Compose([transforms.RandomRotation(20), transforms.RandomResizedCrop(299), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transforms = transforms.Compose([transforms.Resize((299,299)), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize((299,299)), transforms.CenterCrop(299), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) print(f'Creating Datasets ') #Load the datasets with ImageFolder train_data = datasets.ImageFolder(train_dir, transform=train_transforms) test_data = datasets.ImageFolder(test_dir, transform=test_transforms) valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True) ###Output Loading Data... Defining Transforms Creating Datasets ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.inception_v3(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133) model_transfer.aux_logits=False if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=0.000030) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 100 loaders_transfer = {"train" : trainloader, "valid" : validloader, "test" : testloader} model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, project_path + file_path, csv_path, optim_path) # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 2.309590 Test Accuracy: 72% (605/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither has_dog = False has_human = False if dog_detector(img_path): has_dog = True print("it's a dog!") #TODO return breed if human_detector(img_path): has_human = True print("it's a Person!") #TODO return breed if (not has_dog and not has_human): print("there are neither dogs nor humans in this image") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_count = 0 dog_count = 0 for i in range(100): human_count += 1 if face_detector(human_files_short[i]) else 0 dog_count += 1 if face_detector(dog_files_short[i]) else 0 print('Human face in human pictures percentage %d%% .' % human_count) print('Human face in dog pictures percentage %d%% .' % dog_count) ###Output Human face in human pictures percentage 98% . Human face in dog pictures percentage 17% . ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path).convert('RGB') transformations = transforms.Compose([transforms.Resize(size=244), transforms.CenterCrop(224), transforms.ToTensor()]) img_tensor = transformations(img).unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() pred = VGG16(img_tensor) pred_index = torch.max(pred,1)[1].item() return pred_index # predicted class index #VGG16_predict(dog_files_short[73]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return True if 150 < index < 269 else False # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_count_in_human = 0 dog_count_in_dog = 0 for i in range(100): dog_count_in_human += 1 if dog_detector(human_files_short[i]) else 0 dog_count_in_dog += 1 if dog_detector(dog_files_short[i]) else 0 print('Detected dog in human pictures percentage %d%% .' % dog_count_in_human) print('Detected dog in dog pictures percentage %d%% .' % dog_count_in_dog) ###Output Detected dog in human pictures percentage 0% . Detected dog in dog pictures percentage 93% . ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets, transforms ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes use_cuda = torch.cuda.is_available() batch_size = 16 num_workers = 0 data_dir = '/data/dog_images/' transform = transforms.Compose([transforms.Resize(size=224), transforms.CenterCrop((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # define training, test and validation data directories data_dir = '/data/dog_images/' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), transform) for x in ['train', 'valid', 'test']} loaders_scratch = { x: torch.utils.data.DataLoader(image_datasets[x], shuffle=True, batch_size=batch_size, num_workers=num_workers) for x in ['train', 'valid', 'test']} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- I have added Resize function to change image to 224 and centered/cropped to 224 x 224 since most of the pretrained models uses 224x224 sized images mostly. Then added horizontally flip and rotation to training data, so if the data are all flat image of dogs, it will create some varity and network can learn better. Finally normalized all data for all color channels. - After transformations I have created 3 datasets : training, validation and test. I choose shuffle True only for train since this operations not required for other datasets and it is good that model sees dataset different order while learning. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2,2) self.fc1 = nn.Linear(64*28*28, 500) self.fc2 = nn.Linear(500, 133) self.dropout = nn.Dropout(0.25) self.batch_norm = nn.BatchNorm1d(num_features=500) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv2(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv3(x))) x = self.dropout(x) x = x.view(-1, 64*28*28) x = self.dropout(F.relu(self.batch_norm(self.fc1(x)))) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() print(model_scratch) ###Output Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=50176, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.25) (batch_norm): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ - Since input image sizes are (224, 224, 3) I have started with depth 3 in first conv layer and changed it to 16. I used kernel size 3x3 and stride 1 with 1 padding. So input hxw size of image did not change. Then I repeated this operations with same ratios.- I added maxpool layer after every conv layer. So after every conv, size is changed 1/2 since I have used MaxPool2d(2,2).After 3x3 kernel operations by jumping 1 px at a time in this operations, I have (28, 28, 64) tensor. To make classification with these tensor, I have changed the shape and make vector in size of (64x28x38,1). Then I started to Linear operations.- In first fc layer, I have changed vector size to 500. - In second layer, I changed to 133 with probabilities, since we have 133 breed types.As a result of that, we have a vector of probabilities for 133 types. Max ratio in these types will be the breed of dog.```Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=50176, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.25) (batch_norm): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))``` (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model #print(use_cuda) #model_scratch.load_state_dict(torch.load('model_scratch.pt')) # train the model model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output True Epoch: 1 Training Loss: 3.513011 Validation Loss: 4.070810 Validation loss decreased (inf --> 4.070810). Saving model ... Epoch: 2 Training Loss: 3.399955 Validation Loss: 4.130342 Epoch: 3 Training Loss: 3.308012 Validation Loss: 4.058548 Validation loss decreased (4.070810 --> 4.058548). Saving model ... Epoch: 4 Training Loss: 3.183540 Validation Loss: 4.066258 Epoch: 5 Training Loss: 3.039488 Validation Loss: 3.929664 Validation loss decreased (4.058548 --> 3.929664). Saving model ... Epoch: 6 Training Loss: 2.935382 Validation Loss: 4.176475 Epoch: 7 Training Loss: 2.795917 Validation Loss: 3.874392 Validation loss decreased (3.929664 --> 3.874392). Saving model ... Epoch: 8 Training Loss: 2.669932 Validation Loss: 3.897323 Epoch: 9 Training Loss: 2.519802 Validation Loss: 3.837462 Validation loss decreased (3.874392 --> 3.837462). Saving model ... Epoch: 10 Training Loss: 2.384984 Validation Loss: 3.790399 Validation loss decreased (3.837462 --> 3.790399). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.781400 Test Accuracy: 12% (104/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() print(loaders_transfer) ###Output {'train': <torch.utils.data.dataloader.DataLoader object at 0x7fb3bd656780>, 'valid': <torch.utils.data.dataloader.DataLoader object at 0x7fb37802d278>, 'test': <torch.utils.data.dataloader.DataLoader object at 0x7fb37802d2e8>} ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) print(model_transfer) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output ResNet( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): Bottleneck( (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer2): Sequential( (0): Bottleneck( (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer3): Sequential( (0): Bottleneck( (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (4): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (5): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer4): Sequential( (0): Bottleneck( (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (avgpool): AvgPool2d(kernel_size=7, stride=1, padding=0) (fc): Linear(in_features=2048, out_features=1000, bias=True) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ Firstly I have created a resnet50 model instance, since it is very efficient and complicated model to recognize stuff. Then I have changed the last fc layer since the original model has 1000 output but we have 13 dog breeds. To do that, I created a Linear layer with 2048 input and 133 output and changed in instance. Since I have created pretrained resnet50 model, I freezed the parameters grad. Because I don't want to train the model in conv layers. Therefor I only changed Linear layer and made training on that. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.fc.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model # model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 1.503385 Validation Loss: 0.726292 Validation loss decreased (inf --> 0.726292). Saving model ... Epoch: 2 Training Loss: 0.748203 Validation Loss: 0.750727 Epoch: 3 Training Loss: 0.613123 Validation Loss: 0.538600 Validation loss decreased (0.726292 --> 0.538600). Saving model ... Epoch: 4 Training Loss: 0.519473 Validation Loss: 0.583013 Epoch: 5 Training Loss: 0.462339 Validation Loss: 0.616664 Epoch: 6 Training Loss: 0.442953 Validation Loss: 0.602258 Epoch: 7 Training Loss: 0.408729 Validation Loss: 0.602475 Epoch: 8 Training Loss: 0.408379 Validation Loss: 0.595022 Epoch: 9 Training Loss: 0.365885 Validation Loss: 0.639029 Epoch: 10 Training Loss: 0.348201 Validation Loss: 0.589587 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.593420 Test Accuracy: 82% (689/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in image_datasets['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path).convert('RGB') transformations = transforms.Compose([transforms.Resize(size=224), transforms.CenterCrop((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_tensor = transformations(img)[:3,:,:].unsqueeze(0) if use_cuda: img_tensor = img_tensor.cuda() pred = model_transfer(img_tensor) _, preds_tensor = torch.max(pred, 1) pred_index = np.squeeze(preds_tensor.numpy()) if not use_cuda else np.squeeze(preds_tensor.cpu().numpy()) breed = class_names[pred_index] return breed ind = 873 print(dog_files[ind]) predict_breed_transfer(dog_files[ind]) ###Output /data/dog_images/train/129.Tibetan_mastiff/Tibetan_mastiff_08179.jpg ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if(face_detector(img_path)): print('hello, hooman!') image = Image.open(img_path) plt.imshow(image) plt.show() breed = predict_breed_transfer(img_path) print('You look like a ...') print(breed) elif(dog_detector(img_path)): print('hello, doggo!') image = Image.open(img_path) plt.imshow(image) plt.show() breed = predict_breed_transfer(img_path) print('You look like a ...') print(breed) else: print('hello, not doggo or hooman creature!') image = Image.open(img_path) plt.imshow(image) plt.show() print('Maybe, you want to try with a cute doggo?') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)- Model can be trained more, since it tends to decrease loss both on validation and training sets. I assume, it can be better model and will be perform better on test set as well.- More conv layers between MaxPool layers can be used, since all the images are dogs and identifying shapes and edges is very sensitive while deciding breed. So, more conv layers will be better to identify and correlate to shapes, edges and colors.- Can be create more complex model and algorithm for a picture that has human and dog/dogs in. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output hello, hooman! ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. c_hum = 0 for human_pic in tqdm(human_files_short): c_hum+=int(face_detector(human_pic)) print(f"{c_hum} / {len(human_files_short)} human faces detected in human pictures") c_dog = 0 for dog_pic in tqdm(dog_files_short): c_dog+=int(face_detector(dog_pic)) print(f"{c_dog} / {len(dog_files_short)} human faces detected in dog pictures") ###Output 100%|██████████| 100/100 [00:04<00:00, 21.34it/s] 1%| | 1/100 [00:00<00:18, 5.47it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transform = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor()]) img = Image.open(img_path) img = transform(img) if use_cuda: img = img.cuda() img = img.unsqueeze(0) out = VGG16(img) return torch.argmax(out).item() # predicted class index VGG16_predict('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): class_detected = VGG16_predict(img_path) return class_detected in range(151,269) # true/false dog_detector('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. c_hum = 0 for human_pic in tqdm(human_files_short): c_hum+=int(dog_detector(human_pic)) print(f"{c_hum} / {len(human_files_short)} dogs detected in human pictures") c_dog = 0 for dog_pic in tqdm(dog_files_short): c_dog+=int(dog_detector(dog_pic)) print(f"{c_dog} / {len(dog_files_short)} dogs detected in dog pictures") ###Output 100%|██████████| 100/100 [00:03<00:00, 33.25it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transform = transforms.Compose([transforms.Resize(224),transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transform = transforms.Compose([transforms.Resize(224),transforms.CenterCrop(224),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) trainset = datasets.ImageFolder(root="dogImages/train/", transform=train_transform) validset = datasets.ImageFolder(root="dogImages/valid/", transform=valid_transform) testset = datasets.ImageFolder(root="dogImages/test/", transform=valid_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) validloader = torch.utils.data.DataLoader(validset, batch_size=32, shuffle=True) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=True) print(len(trainset)) print(len(validset)) print(len(testset)) for x,y in validloader: x = x[3] x = x.numpy() plt.imshow(np.transpose(x, (1,2,0))) break ###Output Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self, drop_prob=0.3): super(Net, self).__init__() ## Define layers of a CNN #224*224 self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2,2) self.fc1 = nn.Linear(64*28*28, 500) self.fc2 = nn.Linear(500, 133) self.dropout = nn.Dropout(p=drop_prob) self.batch_norm = nn.BatchNorm1d(num_features=500) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) #16*112*112 x = self.pool(F.relu(self.conv2(x))) #32*56*56 x = self.pool(F.relu(self.conv3(x))) #64*28*28 x = self.dropout(x) x = x.view(x.shape[0],64*28*28) x = F.relu(self.batch_norm(self.fc1(x))) x = self.fc2(x) x = self.dropout(x) out = torch.log_softmax(x, dim=1) return out #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() sum(p.numel() for p in model_scratch.parameters()) for x,y in trainloader: x= x.cuda() test = model_scratch(x) print(test.shape) print(torch.argmax(test, dim=1)[:10]) break ###Output torch.Size([32, 133]) tensor([ 2, 2, 132, 62, 43, 71, 63, 42, 91, 100], device='cuda:0') ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.NLLLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, trainloader, validloader, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(trainloader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() out = model(data) loss = criterion(out, target) loss.backward() optimizer.step() train_loss += loss.item() * data.shape[0] train_loss = train_loss / len(trainset) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(validloader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss out = model(data) loss = criterion(out, target) valid_loss += loss.item() * data.shape[0] valid_loss = valid_loss / len(validset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: valid_loss_min=valid_loss print("Best model yet, saving model...") torch.save(model_scratch.state_dict(), save_path) return model # train the model model_scratch = train(20, trainloader, validloader, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(testloader, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(testloader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss += loss.item() # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) test_loss = test_loss/len(testset) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(testloader, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 0.133955 Test Accuracy: 9% (79/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code trainloader, validloader, testloader ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer=VGG16 for p in model_transfer.parameters(): p.requires_grad=False print(model_transfer) in_features = model_transfer.classifier[6].in_features model_transfer.classifier[6] = nn.Linear(in_features,133) print(model_transfer) if use_cuda: model_transfer = model_transfer.cuda() sum([p.numel() for p in model_transfer.parameters() if p.requires_grad]) ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer =train(20, trainloader, validloader, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(testloader, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.024633 Test Accuracy: 84% (710/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in trainset.classes] def predict_breed_transfer(img_path, model, transform): # load the image and return the predicted breed img = Image.open(img_path) img = transform(img) if use_cuda: img = img.cuda() img = img.unsqueeze(0) out = model(img) return class_names[torch.argmax(out).item()] predict_breed_transfer("dogImages/train/004.Akita/Akita_00220.jpg", model_transfer, valid_transform) ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path, model_dog, transform): ## handle cases for a human face, dog, and neither if dog_detector(img_path): print("Dog Detected!") print(f"Race predicted : {predict_breed_transfer(img_path, model_dog, transform)}") elif face_detector(img_path): print("Human Detected!") print(f"You look like a {predict_breed_transfer(img_path, model_dog, transform)}") else : print("No one is there") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[100:103])): run_app(file, model_transfer, valid_transform) ###Output Human Detected! You look like a Australian shepherd Human Detected! You look like a Pharaoh hound Human Detected! You look like a Dachshund Dog Detected! Race predicted : Basenji Dog Detected! Race predicted : Basenji Dog Detected! Race predicted : Basenji ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code !python3 -m pip freeze import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) if len(faces) == 0 and 'lfw' in img_path: print(img_path) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)There are 99 faces in human files and 18 faces in dog files. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# print(f"There are {sum([face_detector(img) for img in human_files_short])} faces in human files") print(f"There are {sum([face_detector(img) for img in dog_files_short])} faces in dog files") ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output lfw/Claudia_Pechstein/Claudia_Pechstein_0005.jpg There are 99 faces in human files There are 18 faces in dog files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code use_cuda = torch.cuda.is_available() import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' image = cv2.imread(img_path) normalize = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) transformed_image = normalize(image) transformed_image = transformed_image.unsqueeze(0) VGG16.eval() predictions_output = VGG16(transformed_image) return predictions_output.data.numpy().argmax() VGG16_predict('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code def dog_detector(img_path): ## TODO: Complete the function. return VGG16_predict(img_path) in range(151, 269) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 94% dogs were classified as dogs, 0% humans were classified as dogs. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. humans_as_dogs = 0 dogs_as_dogs = 0 for image in human_files_short: if dog_detector(image): humans_as_dogs += 1 for image in dog_files_short: if dog_detector(image): dogs_as_dogs += 1 else: print(image) print(f"There are {humans_as_dogs / len(human_files_short) * 100}% human photos classified as dogs") print(f"There are {dogs_as_dogs / len(dog_files_short) * 100}% dog photos classified as dogs") ###Output dogImages/valid/043.Canaan_dog/Canaan_dog_03095.jpg dogImages/valid/043.Canaan_dog/Canaan_dog_03070.jpg dogImages/valid/024.Bichon_frise/Bichon_frise_01708.jpg dogImages/valid/036.Briard/Briard_02532.jpg dogImages/valid/054.Collie/Collie_03842.jpg dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02451.jpg There are 0.0% human photos classified as dogs There are 94.0% dog photos classified as dogs ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torch from torchvision import datasets traindir = "dogImages/train" valdir = "dogImages/valid" testdir = "dogImages/test" batch_size = 64 train_loader = torch.utils.data.DataLoader( datasets.ImageFolder(traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) test_loader = torch.utils.data.DataLoader( datasets.ImageFolder(testdir, transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) loaders_scratch = {'train': train_loader, 'valid': val_loader, 'test': test_loader} ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- I resized to 224x224, because it's commonly used in some architectures.- I made a horizontal flip, because dog is still a dog of the same breed after this operation. I didn't flip or translate the image, because it could do too big distortions to the dog in the picture, so it could be too hard to be classified as proper breed. Then I removed the horizontal flip from the valid and test loaders, because these images are not the original ones, that I should try to predict properly. I made a center crop for the valid_loader and resize for the test loader (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv_1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1) self.conv_1_bn = nn.BatchNorm2d(32) self.pooling = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv_2 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) self.conv_2_bn = nn.BatchNorm2d(64) self.conv_3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1) self.conv_3_bn = nn.BatchNorm2d(128) self.linear_1 = nn.Linear(128 * 7 * 7, 500) self.linear_2 = nn.Linear(500, 133) self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior # 3x224x224 x = F.relu(self.conv_1_bn(self.conv_1(x))) # 16x224x224 x = self.pooling(x) # 16x112x112 x = F.relu(self.conv_2_bn(self.conv_2(x))) # 32x112x112 x = self.pooling(x) # 32x56x56 x = F.relu(self.conv_3_bn(self.conv_3(x))) # 64x56x56 x = self.pooling(x) # 64x28x28 x = x.view(-1, 128 * 7 * 7) x = F.relu(self.linear_1(x)) x = self.dropout(x) x = self.linear_2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I created three conv 2d layers - one making 32 filters, then second making 64 and another, making 128 filters. Each conv layer was followed by pooling 2x2, so the size was always two times smaller. On the last two steps, there are Linear layers, one using all the neurons so 128 x 7 x 7 and compressing this amount of neurons into 500 and then from 500 we get 133 neurons, each one is the next class. I used pooling so it's going to be faster, I used 16 filters, then 32 then 128 to check if this classical approach will work properly for this task. Previously I tried to use three conv layers 16 filters, 32 filters and again 16 filters, but this approach was too limited and couldn't achieve 10% on test set. I used dropout with probability 0.3 of eliminating a neuron. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(params=model_scratch.parameters(), lr=.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile from torch.autograd import Variable ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf last_best_val_loss=1000000 for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like data, target = Variable(data), Variable(target) optimizer.zero_grad() try: outputs = model(data) except: outputs, aux = model(data) loss = criterion(outputs, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss data, target = Variable(data), Variable(target) optimizer.zero_grad() try: outputs = model(data) except: outputs, aux = model(data) loss = criterion(outputs, target) loss.backward() optimizer.step() valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < last_best_val_loss: last_best_val_loss = valid_loss torch.save(model, 'model_scratch.pt') print("saved") # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy # model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.819551 Validation Loss: 4.672046 saved ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code import numpy as np def test(loaders, model, criterion, use_cuda): #model_scratch = torch.load('model_transfer.pt', map_location=torch.device('cpu')) #model_scratch.load_state_dict(torch.load('model_scratch.pt')) # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model try: output = model(data) except: output, aux = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.863764 Test Accuracy: 15% (129/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code import os import torchvision.transforms as transforms from torchvision import datasets import torch traindir = "dogImages/train" valdir = "dogImages/valid" testdir = "dogImages/test" batch_size = 64 train_loader = torch.utils.data.DataLoader( datasets.ImageFolder(traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) test_loader = torch.utils.data.DataLoader( datasets.ImageFolder(testdir, transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])), batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True) loaders_transfer = {'train': train_loader, 'valid': val_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16_bn(pretrained=True).cpu() model_transfer.classifier[-1] = nn.Linear(4096, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I tried to use many different CNN architectures, which were available in torchvision.models package. I tried inception v3 as the first choice, but it was learning really slow on my laptop and the drop in loss for both train and validation set were insignificant for first 5 epochs. Then I tried to use resnet18, but it couldn't learn this concept. Then I used vgg16 ad it was used in the previous exercise, but it was returning only nan loss for each epoch. Also, it was very slow so I tried to learn it on Amazon EC2 machine. Then I used vgg16_bn, so VGG with batch normalization and then the loss was dropping really much, so I sticked to this architecture. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(params=model_transfer.parameters(), lr=.001, momentum=.95, nesterov=True) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code the following import is required for training to be robust to truncated images from PIL import ImageFile from torch.autograd import Variable ImageFile.LOAD_TRUNCATED_IMAGES = True def train_transfer(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf last_best_val_loss=1000000 for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like data, target = Variable(data), Variable(target) optimizer.zero_grad() try: outputs = model(data) except: outputs, aux = model(data) loss = criterion(outputs, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss data, target = Variable(data), Variable(target) optimizer.zero_grad() try: outputs = model(data) except: outputs, aux = model(data) loss = criterion(outputs, target) loss.backward() optimizer.step() valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < last_best_val_loss: last_best_val_loss = valid_loss torch.save(model, 'model_scratch.pt') print("saved") # return trained model return model def test_transfer(loaders, model, criterion, use_cuda): #model_scratch = torch.load('model_scratch.pt').cpu() #model_scratch.load_state_dict(torch.load('model_scratch.pt')) # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if False: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model try: output = model(data) except: output, aux = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) #train the model n_epochs = 30 # model_transfer = train_transfer(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') DEVICE = torch.device('cuda' if False else 'cpu') # 'cpu' in this case # load the model that got the best validation accuracy (uncomment the line below) model_transfer = torch.load('model_transfer.pt', map_location=DEVICE) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test_transfer(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.723970 Test Accuracy: 83% (694/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] def find_classes(dir_): classes = os.listdir(dir_) classes.sort() class_to_idx = {i: classes[i][4:] for i in range(len(classes))} return classes, class_to_idx class_names = find_classes("dogImages/train")[1] #class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] print(class_names) def predict_breed_transfer(img_path): # load the image and return the predicted breed image = cv2.imread(img_path) normalize = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) transformed_image = normalize(image) transformed_image = transformed_image.unsqueeze(0) model_transfer.eval() predictions_output = model_transfer(transformed_image) return class_names[predictions_output.data.numpy().argmax()] print(predict_breed_transfer('./dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg')) import glob dict_of_races = dict() for image in glob.glob("dogImages/*/*/*"): race_name = image.split("/")[-2][4:] if race_name not in dict_of_races.keys(): dict_of_races[race_name] = 1 else: dict_of_races[race_name] += 1 print({k: v for k, v in sorted(dict_of_races.items(), key=lambda item: item[1])} ) print(dict_of_races) ###Output {'Xoloitzcuintli': 33, 'Norwegian_buhund': 33, 'Plott': 35, 'Manchester_terrier': 36, 'Saint_bernard': 37, 'Wirehaired_pointing_griffon': 37, 'Yorkshire_terrier': 38, 'Smooth_fox_terrier': 38, 'Parson_russell_terrier': 38, 'Petit_basset_griffon_vendeen': 39, 'Neapolitan_mastiff': 39, 'Pointer': 40, 'Norwegian_lundehund': 41, 'Field_spaniel': 41, 'Portuguese_water_dog': 42, 'Finnish_spitz': 42, 'American_water_spaniel': 42, 'Lowchen': 42, 'Bluetick_coonhound': 44, 'Kerry_blue_terrier': 44, 'Otterhound': 44, 'Irish_red_and_white_setter': 46, 'Black_and_tan_coonhound': 46, 'Old_english_sheepdog': 49, 'English_toy_spaniel': 49, 'Pharaoh_hound': 49, 'Great_dane': 50, 'Black_russian_terrier': 51, 'Giant_schnauzer': 51, 'Silky_terrier': 51, 'German_wirehaired_pointer': 52, 'Entlebucher_mountain_dog': 53, 'Miniature_schnauzer': 53, 'Lhasa_apso': 53, 'Gordon_setter': 54, 'Labrador_retriever': 54, 'Glen_of_imaal_terrier': 55, 'Pomeranian': 55, 'Komondor': 55, 'Norwich_terrier': 55, 'Welsh_springer_spaniel': 55, 'Keeshond': 55, 'Norwegian_elkhound': 56, 'Bouvier_des_flandres': 56, 'Leonberger': 57, 'Greater_swiss_mountain_dog': 57, 'Ibizan_hound': 58, 'Australian_terrier': 58, 'Norfolk_terrier': 58, 'German_pinscher': 59, 'Cocker_spaniel': 59, 'Doberman_pinscher': 59, 'Belgian_tervuren': 59, 'Maltese': 60, 'Bedlington_terrier': 60, 'Tibetan_mastiff': 60, 'Pekingese': 60, 'German_shorthaired_pointer': 60, 'Clumber_spaniel': 61, 'Kuvasz': 61, 'Poodle': 62, 'Canaan_dog': 62, 'Icelandic_sheepdog': 62, 'Anatolian_shepherd_dog': 62, 'Brittany': 62, 'Newfoundland': 62, 'Chinese_shar-pei': 62, 'Lakeland_terrier': 62, 'American_foxhound': 63, 'Dandie_dinmont_terrier': 63, 'Chinese_crested': 63, 'Beauceron': 63, 'Curly-coated_retriever': 63, 'French_bulldog': 64, 'Irish_water_spaniel': 64, 'Airedale_terrier': 65, 'Border_terrier': 65, 'Irish_setter': 66, 'Irish_wolfhound': 66, 'Boykin_spaniel': 66, 'Pembroke_welsh_corgi': 66, 'Cardigan_welsh_corgi': 66, 'Bulldog': 66, 'English_setter': 66, 'English_springer_spaniel': 66, 'Nova_scotia_duck_tolling_retriever': 67, 'Chesapeake_bay_retriever': 67, 'Chihuahua': 68, 'Greyhound': 70, 'Borzoi': 70, 'Collie': 71, 'Japanese_chin': 71, 'Brussels_griffon': 71, 'Mastiff': 72, 'Afghan_hound': 73, 'Italian_greyhound': 73, 'Great_pyrenees': 74, 'Beagle': 74, 'Dogue_de_bordeaux': 75, 'English_cocker_spaniel': 76, 'Havanese': 76, 'Bichon_frise': 77, 'Bearded_collie': 77, 'German_shepherd_dog': 78, 'Chow_chow': 78, 'Belgian_malinois': 78, 'Cairn_terrier': 79, 'Flat-coated_retriever': 79, 'Papillon': 79, 'Akita': 79, 'Golden_retriever': 80, 'Bloodhound': 80, 'Boxer': 80, 'Affenpinscher': 80, 'American_eskimo_dog': 80, 'Cane_corso': 80, 'Belgian_sheepdog': 80, 'Briard': 81, 'Bernese_mountain_dog': 81, 'Boston_terrier': 81, 'American_staffordshire_terrier': 82, 'Irish_terrier': 82, 'Dachshund': 82, 'Australian_cattle_dog': 83, 'Australian_shepherd': 83, 'Cavalier_king_charles_spaniel': 84, 'Bullmastiff': 86, 'Basenji': 86, 'Bull_terrier': 87, 'Dalmatian': 89, 'Basset_hound': 92, 'Border_collie': 93, 'Alaskan_malamute': 96} {'Pointer': 40, 'French_bulldog': 64, 'Poodle': 62, 'Nova_scotia_duck_tolling_retriever': 67, 'Canaan_dog': 62, 'Bichon_frise': 77, 'Briard': 81, 'Irish_setter': 66, 'Great_dane': 50, 'English_cocker_spaniel': 76, 'Glen_of_imaal_terrier': 55, 'Irish_wolfhound': 66, 'Collie': 71, 'Norwegian_elkhound': 56, 'Gordon_setter': 54, 'Boykin_spaniel': 66, 'American_foxhound': 63, 'Icelandic_sheepdog': 62, 'Dandie_dinmont_terrier': 63, 'Golden_retriever': 80, 'Portuguese_water_dog': 42, 'German_pinscher': 59, 'American_staffordshire_terrier': 82, 'Pomeranian': 55, 'Black_russian_terrier': 51, 'Havanese': 76, 'Irish_terrier': 82, 'Afghan_hound': 73, 'Bloodhound': 80, 'Yorkshire_terrier': 38, 'Chinese_crested': 63, 'Giant_schnauzer': 51, 'Xoloitzcuintli': 33, 'Maltese': 60, 'Bedlington_terrier': 60, 'Boxer': 80, 'Airedale_terrier': 65, 'Finnish_spitz': 42, 'Petit_basset_griffon_vendeen': 39, 'Bluetick_coonhound': 44, 'Entlebucher_mountain_dog': 53, 'Miniature_schnauzer': 53, 'Cairn_terrier': 79, 'Irish_water_spaniel': 64, 'Pembroke_welsh_corgi': 66, 'Bernese_mountain_dog': 81, 'Silky_terrier': 51, 'Anatolian_shepherd_dog': 62, 'Affenpinscher': 80, 'Japanese_chin': 71, 'Ibizan_hound': 58, 'Bearded_collie': 77, 'Brittany': 62, 'Australian_terrier': 58, 'Cardigan_welsh_corgi': 66, 'German_shepherd_dog': 78, 'Border_collie': 93, 'Beauceron': 63, 'Norfolk_terrier': 58, 'Komondor': 55, 'Australian_cattle_dog': 83, 'Lhasa_apso': 53, 'Kerry_blue_terrier': 44, 'Bulldog': 66, 'Saint_bernard': 37, 'English_setter': 66, 'Flat-coated_retriever': 79, 'Greyhound': 70, 'Bullmastiff': 86, 'Cocker_spaniel': 59, 'Norwich_terrier': 55, 'Boston_terrier': 81, 'Plott': 35, 'Doberman_pinscher': 59, 'Dogue_de_bordeaux': 75, 'Papillon': 79, 'Tibetan_mastiff': 60, 'Cavalier_king_charles_spaniel': 84, 'Dalmatian': 89, 'Norwegian_lundehund': 41, 'Leonberger': 57, 'Irish_red_and_white_setter': 46, 'Wirehaired_pointing_griffon': 37, 'Neapolitan_mastiff': 39, 'Mastiff': 72, 'Old_english_sheepdog': 49, 'Chow_chow': 78, 'Manchester_terrier': 36, 'Bull_terrier': 87, 'Belgian_malinois': 78, 'Pekingese': 60, 'English_toy_spaniel': 49, 'Labrador_retriever': 54, 'Black_and_tan_coonhound': 46, 'Great_pyrenees': 74, 'German_shorthaired_pointer': 60, 'American_water_spaniel': 42, 'Akita': 79, 'Borzoi': 70, 'Border_terrier': 65, 'Belgian_tervuren': 59, 'Basenji': 86, 'Bouvier_des_flandres': 56, 'Norwegian_buhund': 33, 'Welsh_springer_spaniel': 55, 'Beagle': 74, 'Smooth_fox_terrier': 38, 'Newfoundland': 62, 'Chesapeake_bay_retriever': 67, 'Basset_hound': 92, 'Curly-coated_retriever': 63, 'Alaskan_malamute': 96, 'German_wirehaired_pointer': 52, 'Chinese_shar-pei': 62, 'Lakeland_terrier': 62, 'Dachshund': 82, 'Otterhound': 44, 'Field_spaniel': 41, 'Chihuahua': 68, 'Brussels_griffon': 71, 'American_eskimo_dog': 80, 'Greater_swiss_mountain_dog': 57, 'Pharaoh_hound': 49, 'English_springer_spaniel': 66, 'Cane_corso': 80, 'Belgian_sheepdog': 80, 'Parson_russell_terrier': 38, 'Australian_shepherd': 83, 'Keeshond': 55, 'Lowchen': 42, 'Clumber_spaniel': 61, 'Kuvasz': 61, 'Italian_greyhound': 73} ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): print(img_path) if dog_detector(img_path): dog_breed = predict_breed_transfer(img_path) return f"There is a dog in the picture and the predicted breed is {dog_breed}" elif face_detector(img_path): predicted_breed_human = predict_breed_transfer(img_path) return f"There is a human face in the provided photo and this human looks like {predicted_breed_human}!" else: return "Error, no human face and no dog in the image" ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The results for the images provided is very nice, but for photos from the internet it's not as good. I thought it's going to predict boxer dogs from the internet properly, but it failed for two images, which container typical boxer dog. I could do many things for the next steps to make it work better: 1) Take much more images of some of the classes from the internet so the dataset will be bigger and better for this task.2) Learn another network, or learn it for a longer time than 30 epochs, so it can learn better3) I could somehow clear the photos' backgrounds, which can cause harm when predicting. Some dog breeds like Alaskan Huskey could be associated with snowy background and if I would give a photo of a Huskey in the forest it could get it wrong.4) I could use another type of neural network, or finetune the current approach, because right now it's not perfect, but maybe using another optimization algorithm, or tuning the hyperparameters like learning rate or decay rate could help. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below print(run_app("images/American_water_spaniel_00648.jpg")) # print(run_app("dogImages/tests_internet/boxer_2.webp")) for file in np.hstack((human_files[:3], dog_files[:3])): print(run_app(file)) ###Output images/American_water_spaniel_00648.jpg There is a dog in the picture and the predicted breed is Curly-coated_retriever lfw/German_Khan/German_Khan_0001.jpg There is a human face in the provided photo and this human looks like Alaskan_malamute! lfw/Stefano_Gabbana/Stefano_Gabbana_0001.jpg There is a human face in the provided photo and this human looks like Italian_greyhound! lfw/Dragan_Covic/Dragan_Covic_0001.jpg There is a human face in the provided photo and this human looks like Alaskan_malamute! dogImages/valid/122.Pointer/Pointer_07831.jpg There is a dog in the picture and the predicted breed is Pointer dogImages/valid/122.Pointer/Pointer_07826.jpg There is a dog in the picture and the predicted breed is Pointer dogImages/valid/122.Pointer/Pointer_07834.jpg There is a dog in the picture and the predicted breed is Pointer ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. #Define a vectorized function which takes a nested sequence of objects or numpy arrays #as inputs and returns a single numpy array or a tuple of numpy arrays. fv_face_detector = np.vectorize(face_detector) #Run the face detector function on the both subset total_of_humans_detected = fv_face_detector(human_files_short) total_of_dog_humans_detected = fv_face_detector(dog_files_short) #Calculate the percentages human_percent = (sum(total_of_humans_detected)/len(total_of_humans_detected))*100 dog_humans_percent = (sum(total_of_dog_humans_detected)/len(total_of_dog_humans_detected))*100 print('{:.2f} % of the first 100 images in human_files have a detected human face'.format(human_percent)) print('{:.2f} % of the first 100 images in dog_files have a detected human face.'.format(dog_humans_percent)) ###Output 99.00 % of the first 100 images in human_files have a detected human face 18.00 % of the first 100 images in dog_files have a detected human face. ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /Users/brocycenteio/.cache/torch/checkpoints/vgg16-397923af.pth 100%|██████████| 528M/528M [10:17<00:00, 897kB/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # Import image img = Image.open(img_path) # Apply the transformation data_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = data_transform(img) img_tensor = img.unsqueeze_(0) # Move tensor to GPU if available if use_cuda: img_tensor = img_tensor.cuda() VGG16.eval() with torch.no_grad(): output = VGG16(img_tensor) prediction = torch.argmax(output).item() VGG16.train() return prediction # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. result_index = VGG16_predict(img_path) if result_index >= 151 and result_index <= 268: return True return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. total_of_dog_detected = 0.0 total_of_humans_dog_detected = 0.0 num_files = len(human_files_short) for i in range(0, num_files): human_path = human_files_short[i] dog_path = dog_files_short[i] if dog_detector(human_path) == True: total_of_humans_dog_detected += 1 if dog_detector(dog_path) == True: total_of_dog_detected += 1 humans_dog_percent = (total_of_humans_dog_detected / num_files) dog_percent = (total_of_dog_detected / num_files) print('{0:.0%} of the images in human_files_short have a detected dog.'.format(humans_dog_percent)) print('{0:.0%} of the images in dog_files_short have a detected dog'.format(dog_percent)) ###Output 1% of the images in human_files_short have a detected dog. 93% of the images in dog_files_short have a detected dog ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms import torch import numpy as np from PIL import ImageFile ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ImageFile.LOAD_TRUNCATED_IMAGES = True %matplotlib inline # Check if CUDA is available use_cuda = torch.cuda.is_available() print('CUDA :', use_cuda) std_normalization = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) dt_transforms = { 'train' : transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), std_normalization]), 'valid' : transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test' : transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), std_normalization]) } data_train = datasets.ImageFolder(os.path.join('dogImages', 'train/'), transform=dt_transforms['train']) data_valid = datasets.ImageFolder(os.path.join('dogImages', 'valid/'), transform=dt_transforms['valid']) data_test = datasets.ImageFolder(os.path.join('dogImages', 'test/'), transform=dt_transforms['test']) dataLoader_train = torch.utils.data.DataLoader(data_train, batch_size=20, num_workers=0, shuffle=True) dataLoader_valid = torch.utils.data.DataLoader(data_valid, batch_size=20, num_workers=0, shuffle=False) dataLoader_test = torch.utils.data.DataLoader(data_test, batch_size=20, num_workers=0, shuffle=False) ###Output CUDA : False ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- Yes I am resizing the images to 244x244 using transforms functions, because images are big, the execution will take too much time and memories if i use it in the original size, and also because the input size of VGG-16 is 224x224.- Yes I am using Image augmentation to give randomness to the dataset so, it prevents overfitting and I can expect better performance of model when it's predicting toward test_data. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. import sys human_face_in_humans = 0 human_face_in_dogs = 0 with tqdm(total=100, file=sys.stdout) as pbar: for i in range(1, len(human_files_short)): if face_detector(human_files_short[i]): human_face_in_humans += 1 if face_detector(dog_files_short[i]): human_face_in_dogs += 1 # Manually update the progress bar, useful for streams such as reading files. pbar.update(1) # Updates in increments of 10 stops at 100 print('Percentage of human faces detected in human_files: {}%'.format(human_face_in_humans)) print('Percentage of human faces detected in dog_files: {}%'.format(human_face_in_dogs)) ###Output 99%|█████████▉| 99/100 [00:32<00:00, 3.07it/s] Percentage of human faces detected in human_files: 97% Percentage of human faces detected in dog_files: 17% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:27<00:00, 19874634.67it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # convert data to a normalized torch.FloatTensor normalize = transforms.Compose([transforms.Resize(size=(244, 244)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) # Load the image and apply the transform img = Image.open(img_path) #plt.imshow(np.asarray(img)) tensor = normalize(img).unsqueeze_(0) if torch.cuda.is_available(): output = VGG16(tensor.cuda()) else: output = VGG16(tensor) output = output.data.cpu().numpy().argmax() # Our prediction will be the index of the class label with the largest value. return output# predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. output = VGG16_predict(img_path) if output in range(151, 269): return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_face_in_humans = 0 dog_face_in_dogs = 0 with tqdm(total=100, file=sys.stdout) as pbar: for i in range(1, len(human_files_short)): if dog_detector(human_files_short[i]): dog_face_in_humans += 1 if dog_detector(dog_files_short[i]): dog_face_in_dogs += 1 # Manually update the progress bar, useful for streams such as reading files. pbar.update(1) # Updates in increments of 10 stops at 100 print('Percentage of dog faces detected in human_files: {}%'.format(dog_face_in_humans)) print('Percentage of dog faces detected in dog_files: {}%'.format(dog_face_in_dogs)) ###Output 99%|█████████▉| 99/100 [00:09<00:00, 10.55it/s] Percentage of dog faces detected in human_files: 0% Percentage of dog faces detected in dog_files: 99% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torch from torchvision import datasets import torchvision.transforms as transforms ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # how many samples per batch to load batch_size = 32 num_workers = 0 data_dir = '/data/dog_images/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') # convert data to a normalized torch.FloatTensor standard_normalization = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), standard_normalization]), 'val': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), standard_normalization]), 'test': transforms.Compose([transforms.Resize(size=(224,224)), transforms.ToTensor(), standard_normalization]) } # choose the training and test datasets train_data = datasets.ImageFolder(train_dir, transform=data_transforms['train']) valid_data = datasets.ImageFolder(valid_dir, transform=data_transforms['val']) test_data = datasets.ImageFolder(test_dir, transform=data_transforms['test']) # data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = {'train' : train_loader, 'valid' : valid_loader, 'test' : test_loader} # specify the image classes #classes = [folder[4:] for folder in os.listdir("data/dog_images/train/")] classes = [folder for folder in os.listdir("/data/dog_images/train/")] num_classes = 133 # total classes of dog breeds ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- For models that have fully connected layers we need to have the same image size at the input because in this architecture each layer needs to know the dimension of the previous layer. Any change in size will raise an error.- Image augmentation will give randomness to the dataset so, it prevents overfitting and I can expect better performance of model when it's predicting toward test_data. On the other hand, I've done Resize of (256) and then, center crop to make 224 X 224. Since valid_data will be used for validation check, I will not do image augmentations. For the test_data, I've applied only image resizing (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 2, stride=2, padding=0) self.conv2 = nn.Conv2d(32, 64, 2, stride=2, padding=0) self.conv3 = nn.Conv2d(64, 128, 2, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(128 * 7 * 7, 500) self.fc2 = nn.Linear(500, 133) self.dropout = nn.Dropout(0.3) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 128 * 7 * 7) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(2, 2), stride=(2, 2)) (conv2): Conv2d(32, 64, kernel_size=(2, 2), stride=(2, 2)) (conv3): Conv2d(64, 128, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=6272, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.3) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ These choices have been made after following an iterative process of improving validation accuracy (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code torch.cuda.empty_cache() # load the model that got the best validation accuracy #model_scratch.load_state_dict(torch.load('model_scratch.pt')) import numpy as np def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path, last_validation_loss=None): """returns trained model""" # initialize tracker for minimum validation loss if last_validation_loss is not None: valid_loss_min = last_validation_loss else: valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # initialize weights to zero optimizer.zero_grad() output = model(data) # calculate loss loss = criterion(output, target) # back prop loss.backward() # grad optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) if batch_idx % 100 == 0: print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.955199 Test Accuracy: 10% (91/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code # data loaders loaders_transfer = {'train' : train_loader, 'valid' : valid_loader, 'test' : test_loader} # specify the image classes #classes = [folder[4:] for folder in os.listdir("data/dog_images/train/")] classes = [folder for folder in os.listdir("/data/dog_images/train/")] ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code ## TODO: Specify model architecture import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # define VGG16 model model_transfer = models.vgg16(pretrained=True) # print out the model structure print(model_transfer) # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False # add last linear layer (n_inputs -> dog classes) # new layers automatically have requires_grad = True n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs, len(classes)) model_transfer.classifier[6] = last_layer # check to see the last layer produces the expected number of outputs print(model_transfer.classifier[6].out_features) print(model_transfer.classifier) # check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_transfer = model_transfer.cuda() ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) 133 Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I have selected VGG16 model because it has been trained with the ImageNet dataset, which has animals inside it corpus. But also, i selected VGG16 because it is widely used to extract features from images. To define a model for training I have follow these steps:1. Load in a pre-trained VGG16 model2. "Freeze" all the parameters, so the net acts as a fixed feature extractor and the parameters does not change during the training.3. Remove the last layer4. Replace the last layer with a linear classifier of my own that fits the number of dog classes I have. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = torch.nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.699644 Test Accuracy: 78% (654/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in os.listdir("/data/dog_images/train/")] def predict_breed_transfer(img_path, model): # load the image and return the predicted breed # convert data to a normalized torch.FloatTensor normalize = transforms.Compose([transforms.Resize(size=(244, 244)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) # Load the image and apply the transform img = Image.open(img_path) tensor = normalize(img).unsqueeze_(0) if torch.cuda.is_available(): model = model.cuda() output = model(tensor.cuda()) else: output = model(tensor) output = output.data.cpu().numpy().argmax() # Our prediction will be the index of the class label with the largest value. return output# predicted class index ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path, model, class_names): ## handle cases for a human face, dog, and neither if face_detector(img_path): # human face has been detected print('Hello! Your are human but you look like....') elif dog_detector(img_path): # dog is detected print('Hello! Your are a dog!') else: print('ERROR: Nor human nor dog has been detected') plot_img(img_path) print(class_names[predict_breed_transfer(img_path, model)]) def plot_img(img_path): # convert BGR image to RGB for plotting img = cv2.imread(img_path) # convert BGR image to RGB for plotting img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) plt.show() ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)- More image datasets of dogs will improve training models. Also, more image augmentations (flipping vertically, move left or right, etc.) will improve performance on test data.- Hyper-parameter tunings: weight initializings, learning rates, drop-outs, batch_sizes, and optimizers will be helpful to improve performances.- Ensembles of models ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[100:103])): run_app(file, model_transfer, class_names) ###Output Hello! Your are human but you look like.... ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm_notebook as tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. detected_faces_in_dog_files = [face_detector(dog_file) for dog_file in tqdm(dog_files_short)] detected_faces_in_human_files = [face_detector(human_file) for human_file in tqdm(human_files_short)] print(f"The face detector classified {sum(detected_faces_in_dog_files)} human faces in {len(dog_files_short)} dog images") print(f"The face detector classified {sum(detected_faces_in_human_files)} human faces in {len(human_files_short)} human images") ###Output The face detector classified 17 human faces in 100 dog images The face detector classified 98 human faces in 100 human images ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 107016691.22it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transformations = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) image = Image.open(img_path) image_tranformed = transformations(image) if use_cuda: image_tranformed = image_tranformed.cuda() VGG16.eval() output = VGG16.forward(torch.unsqueeze(image_tranformed, 0)) value, index = output.topk(1, dim=1) return index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) if index in list(range(151,269)): return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. def test_performance_dog_classification(dog_files_short, human_files_short, model): detected_dogs_in_dog_files = [model(dog_file) for dog_file in tqdm(dog_files_short)] detected_dogs_in_human_files = [model(human_file) for human_file in tqdm(human_files_short)] print(f"The dog detector classified {sum(detected_dogs_in_dog_files)} dogs in {len(dog_files_short)} dog images") print(f"The dog detector classified {sum(detected_dogs_in_human_files)} dogs in {len(human_files_short)} human images") test_performance_dog_classification(dog_files_short, human_files_short, dog_detector) ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. from PIL import Image import torchvision.transforms as transforms from functools import partial def dog_detector_generic(img_path, model): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transformations = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) image = Image.open(img_path) image_tranformed = transformations(image) model.eval() output = model.forward(torch.unsqueeze(image_tranformed, 0)) value, index = output.topk(1, dim=1) if index in list(range(151,269)): return True else: return False ResNet50 = models.resnet50(pretrained=True) restnet_dog_detector = partial(dog_detector_generic, model=ResNet50) test_performance_dog_classification(dog_files_short, human_files_short, restnet_dog_detector) ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import torch import torchvision import torchvision.transforms as transforms import os import torchvision from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True batch_size = 20 data_dir = "/data/dog_images/" ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transform = transforms.Compose([ transforms.Resize((350, 350)), transforms.RandomCrop(224), transforms.RandomRotation(10), transforms.RandomHorizontalFlip(0.1), transforms.RandomVerticalFlip(0.3), transforms.ToTensor(), transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2]), ]) valid_transform = transforms.Compose([ transforms.Resize((350, 350)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2]), ]) test_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2]), ]) train_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "train"), transform=train_transform,) test_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "test"), transform=test_transform,) valid_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "valid"), transform=valid_transform,) train_loader = torch.utils.data.DataLoader(train_dir, batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dir, batch_size) valid_loader = torch.utils.data.DataLoader(valid_dir) import matplotlib.pyplot as plt import numpy as np loader = iter(valid_loader) image, label = next(loader) image = image.numpy().squeeze() image = image.transpose((1,2,0)) image = image * np.array((0.2, 0.2, 0.2)) + np.array((0.4, 0.4, 0.4)) image = image.clip(0, 1) plt.imshow(image) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- It first resizes it to a be 350x350 pixel large- Then I randomly crop a 224x224 part out of this image to increase positional invariance - I chose a saize of 224x224 so that training is relatively quick, but it is still fun to investiagte the images, i.e., they are not too small- I apply random rotation, horizontal, and vertical flip to further increase positional invariance- I transform the image to a tensor- Finally, I normalize the data, such that it all the values are close to 0 and back propagation performs better. Here, I orientated on the normalization step found in the VGG paper- I don't apply all these changes to the test and validation test, except for normalization and resizing, since these are out images on which we want to measure our performance (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) # self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.dropout = nn.Dropout(0.1) self.fc1 = nn.Linear(28 * 28 * 64, 2048) self.fc4 = nn.Linear(2048, 133) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.dropout(self.pool(F.relu(self.conv3(x)))) x = self.dropout(F.relu(self.fc1(x.view(x.shape[0], -1)))) x = F.log_softmax(self.fc4(x), dim=1) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ As highlighted in the lessons, I looked in the internet which models might be suitable to solve this problem with CNNs. VGG is one such solution that is known to perform well on the ImageNet dataset. Thus, I copied the architekture of the smallest model and simplified it to solve my problem. Before I tried a similar architecture, however, it did not learn well. The main difference was, that I was using dropout basically at every point but with very little probability (0.1). This model seems to work better.Inspired vom VGG, in each convolution I take a kernel of size 3x3 and padding of 1 which causes the output filter to stay at the same size as the input. If pooling is applied, I use max pool with a 2x2 sized kernel and a stride of 2. The stride will cause the filter size to be shrinked to half its size.In the first step I take my 224x224 (pixel) sized image with three channels and convolve to create 16 filter. I use the ReLu activation function on these filters and than apply max pooling (filter size 112x112). These smaller 16 filter are then again convolved. I again apply ReLu activation function and pooling yielding 56x56 sized filters. This convolutional step has a dropout chance of 0.5.The last convolution 64 filter where each filter has now a size of 28x28. These 64x56x56 units are then connected to a fully connected layer with 2048 units. This layer is also uses the ReLu activation function and dropout of 0.5.The last layer connects the 2048 units of the layer before with 113 units which equals the amount of dog breeds (the classes in this classification task). I apply log softmax which is the way to go in a multiclass classification task.Later, I apply the negative log likely hood loss to the output of the last layer. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.NLLLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code loaders_scratch = {"train": train_loader, "valid": valid_loader, "test": test_loader} def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like output = model.forward(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # train_loss += loss.item() * data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss with torch.no_grad(): output = model.forward(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.925092 Test Accuracy: 13% (110/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code import os ## TODO: Specify data loaders batch_size = 20 data_dir = "/data/dog_images/" transformations = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) train_transform = transforms.Compose([ transforms.Resize((400, 400)), transforms.RandomCrop(224), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(0.1), transforms.RandomVerticalFlip(0.3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) valid_transform = transforms.Compose([ transforms.Resize((400, 400)), transforms.RandomCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) train_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "train"), transform=train_transform,) test_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "test"), transform=test_transform,) valid_dir = torchvision.datasets.ImageFolder(os.path.join(data_dir, "valid"), transform=valid_transform,) train_loader = torch.utils.data.DataLoader(train_dir, batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dir, batch_size, shuffle=False) valid_loader = torch.utils.data.DataLoader(valid_dir, shuffle=False) loaders_transfer = {"train": train_loader, "valid": valid_loader, "test": test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg19(pretrained=True) use_cuda = torch.cuda.is_available() # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False model_transfer.classifier[6] = nn.Linear(4096, 133) if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (17): ReLU(inplace) (18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (24): ReLU(inplace) (25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (26): ReLU(inplace) (27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (31): ReLU(inplace) (32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (33): ReLU(inplace) (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (35): ReLU(inplace) (36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I decided to take the VGG19 model, since it is close to my model I wrote from scratch. I keep all the feature extracting steps (those that contain convolutions), but replace the final, linear layers that use the features to perform the classification. Thus, I freeze all the weights except for the last linear layers and train the weights of these last classifier layers on the dog images. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.663559 Test Accuracy: 79% (661/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_dir.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) image = Image.open(img_path) image_tranformed = transform(image) if use_cuda: image_tranformed = image_tranformed.cuda() model_transfer.eval() output = model_transfer.forward(torch.unsqueeze(image_tranformed, 0)) value, index = output.topk(1, dim=1) return class_names[index] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): img = cv2.imread(img_path) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img_rgb) plt.show() if dog_detector(img_path): print("It is a dog. More specific a", predict_breed_transfer(img_path)) elif face_detector(img_path): print(f"It is a human that looks like a {predict_breed_transfer(img_path)}") else: print("Ups, could not be recognized") ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The output is good. Humans and dogs are well seperated. Only the breed classifier does not seem to be working well enough. The Mastiff was classified as a Bullmastiff in two out of three times. It is good that the misclassification was within similar breeds. But, performance could be improved. However, an image of a cat is not classified by either algorithms which is good, since both animals look remotly similar from a very naive point of view.Therefore my three points for improvement are to - first, merge the human and dog classification into one model which seems more elegant to me. In edge cases, an image could be now classified by both algorithms as a dog and human respectivally. In a single model, this would be less likely - second, provide more training data to improve classification accuracy- third, due to the interclass similarty, I would use Kendall Tau correlation after classification with the help of a CNN to then select the right breed as shown in https://towardsdatascience.com/this-ai-is-hungry-b2a8655528be. The resulst show that a Weimaraner and a Great Dane were wrongly classified as a Greyhound ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. import shutil import requests urls = ['https://www.fressnapf.de/medias/katze-revieranspr-che-580-500.jpg?context=bWFzdGVyfHJvb3R8NTAzNTF8aW1hZ2UvanBlZ3xoZGIvaDNjLzk2NTgxNzIzNDIzMDIuanBnfGRiYTBkNGMwYWI0MWMzYjhmZGZiNmNiM2NlNGY4YzMwZGI4ODlhMTQ5NWU5YTBmNTQ2ZDFlZDU4NTQ4YmNlODk', 'https://highprofiles.info/wp-content/uploads/2016/04/Yorke-main-wpcf_900x600.jpg', 'https://media.pitchfork.com/photos/5c9ee882fa864d78eca91523/2:1/w_790/Robert-Smith-GettyImages-1139194660.jpg', 'https://mediafiles.mein-haustier.de/wp-content/uploads/2018/03/shutterstock_300817325-komprimiert-1270x608.jpg', 'https://www.zooplus.de/magazin/CACHE_IMAGES/768/content/uploads/2017/03/deutsche-dogge.jpg', 'https://dsw.scene7.com/is/image/DSWShoes/404995_001_ss_01?$pdp-image$'] for i, url in enumerate(urls): response = requests.get(url, stream=True) with open(f'internet_img_{i}.png', 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) for i in range(len(urls)): run_app(f'internet_img_{i}.png') ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob images_dir = './' #PC #images_dir = /data/ #Udacity Workspace # load filenames for human and dog images human_files = np.array(glob(images_dir + "lfw/*/*")) dog_files = np.array(glob(images_dir + "dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) print(human_files[1]) print(dog_files[1000]) ###Output There are 13233 total human images. There are 8351 total dog images. ./lfw/Kate_Burton/Kate_Burton_0001.jpg ./dog_images/valid/106.Newfoundland/Newfoundland_07031.jpg ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 #print(face_detector("images/Brittany_02625.jpg")) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ ###Code from tqdm import tqdm import time human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_files_short_faces_detected_count = 0 dog_files_short_faces_detected_count = 0 start_humans = time.time() for file in human_files_short: if face_detector(file): human_files_short_faces_detected_count += 1 end_humans = time.time() start_dogs = time.time() for file in dog_files_short: if face_detector(file): dog_files_short_faces_detected_count += 1 end_dogs = time.time() print("human_files_short time: {:.2f} sec, dog_files_short time: {:.2f} sec".format((end_humans-start_humans), (end_dogs-start_dogs))) print("") print("What percentage of the first 100 images in human_files have a detected human face? {}%".format(human_files_short_faces_detected_count)) print("What percentage of the first 100 images in dog_files have a detected human face? {}%".format(dog_files_short_faces_detected_count)) ###Output human_files_short time: 1.52 sec, dog_files_short time: 6.03 sec What percentage of the first 100 images in human_files have a detected human face? 97% What percentage of the first 100 images in dog_files have a detected human face? 9% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def preprocess_image(img_path): ''' Given a path to an image files, returns a tensor that can be used in one of the Pytorch torchvision models Args: img_path: path to an image Returns: A tensor that can be used in one of the Pytorch torchvision models ''' img = Image.open(img_path) transform = transforms.Compose([ transforms.Resize(224, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_tensor = transform(img) img_tensor = img_tensor.view(1, 3, 224, 224) #add in a dimension for batch size. return img_tensor def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image #img = cv2.imread(img_path) img_tensor = preprocess_image(img_path) if next(VGG16.parameters()).is_cuda: img_tensor = img_tensor.cuda() output = VGG16(img_tensor) prediction_index = np.argmax(output[0].cpu().detach().numpy()) #print("{}: {}".format(prediction_index, output[0].detach().numpy()[prediction_index])) return prediction_index # predicted class index #run a quick test to make sure it works: print("{}: {}".format(human_files[7], VGG16_predict(human_files[7]))) ###Output ./lfw/Paul_Martin/Paul_Martin_0002.jpg: 834 ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. class_index = VGG16_predict(img_path) is_dog = class_index >= 151 and class_index <= 268 return is_dog # true/false print(dog_detector(human_files[0])) print(dog_detector(dog_files[0])) ###Output False True ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_files_short = human_files[:100] dog_files_short = dog_files[:100] human_files_short_dog_count = 0 dog_files_short_dog_count = 0 start_humans = time.time() for file in human_files_short: if dog_detector(file): human_files_short_dog_count += 1 end_humans = time.time() start_dogs = time.time() for file in dog_files_short: if dog_detector(file): dog_files_short_dog_count += 1 end_dogs = time.time() print("human processing time: {:.2f} sec, dog processing time: {:.2f} sec".format((end_humans-start_humans), (end_dogs-start_dogs))) print("") print("What percentage of the images in human_files_short have a detected dog? {}%".format(human_files_short_dog_count)) print("What percentage of the images in dog_files_short have a detected dog? {}%".format(dog_files_short_dog_count)) ###Output human processing time: 0.74 sec, dog processing time: 1.51 sec What percentage of the images in human_files_short have a detected dog? 1% What percentage of the images in dog_files_short have a detected dog? 99% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. import torch import torchvision.models as models # define VGG16 model alexnet_model = models.alexnet(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: alexnet_model = alexnet_model.cuda() def alexnet_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Index corresponding to VGG-16 m Args: img_path: path to an image Returns:odel's prediction ''' img_tensor = preprocess_image(img_path) if next(alexnet_model.parameters()).is_cuda: img_tensor = img_tensor.cuda() output = alexnet_model(img_tensor) prediction_index = np.argmax(output[0].cpu().detach().numpy()) #print("{}: {}".format(prediction_index, output[0].detach().numpy()[prediction_index])) return prediction_index # predicted class index def dog_detector_alexnet(img_path): class_index = alexnet_predict(img_path) is_dog = class_index >= 151 and class_index <= 268 return is_dog # true/false human_files_short = human_files[:100] dog_files_short = dog_files[:100] mismatched_humans = 0 mismatched_dogs = 0 start_humans = time.time() for file in human_files_short: if dog_detector_alexnet(file): mismatched_humans += 1 end_humans = time.time() start_dogs = time.time() for file in dog_files_short: if not dog_detector_alexnet(file): mismatched_dogs += 1 end_dogs = time.time() print("alexnet_model: Mismatched human pics: {}/100, mismatched dog pics: {}/100".format(mismatched_humans, mismatched_dogs)) print("alexnet_model: human processing time: {:.2f} sec, dog processing time: {:.2f} sec".format((end_humans-start_humans), (end_dogs-start_dogs))) ###Output alexnet_model: Mismatched human pics: 0/100, mismatched dog pics: 3/100 alexnet_model: human processing time: 0.40 sec, dog processing time: 1.02 sec ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes def get_loaders(): train_dir = images_dir + 'dog_images/train' valid_dir = images_dir + 'dog_images/valid' test_dir = images_dir + 'dog_images/test' train_transform = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transform = transforms.Compose([transforms.Resize(240, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transform = transforms.Compose([transforms.Resize(240, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) train_dataset = datasets.ImageFolder(train_dir, transform=train_transform) valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transform) test_dataset = datasets.ImageFolder(test_dir, transform=test_transform) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=64, shuffle=True) testloader = torch.utils.data.DataLoader(test_dataset, batch_size=64) loaders = {} loaders['train'] = trainloader loaders['valid'] = validloader loaders['test'] = testloader return loaders ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:Resizing Images: - The train dataset gets the most randomization to increase the amount of variation in the images it is trained on. With ransomized rotation, cropping, and horizontal flipping. - The validation and tests datasets are both resized and cropped for consistency when reporting the loss and effectiveness of the model.All data sets are sized to 224 X 224 and normalized to conform to the requirements of the pre-trained models in the torchvision.models package. This allows me to re-use the transforms later when using the VGG16 pre-trained model and provides consistenty between the scratch and transfer models for comparison of their accuracy. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() # convolutional layer 1 (in_channels, out_channels, kernel_size, stride=1, padding=0) self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1) # convolutional layer 2 self.conv2 = nn.Conv2d(16, 32, 3, stride=1, padding=1) # convolutional layer 3 self.conv3 = nn.Conv2d(32, 64, 3, stride=1, padding=1) # max pooling layer self.maxpool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 14 * 14, 512) #64 * 14 * 14 = 12544 #(224x224x3) images: 64 channels X 224/2/2/2/2 X 224/2/2/2/2 self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, 133) self.dropout = nn.Dropout(0.25) #with drouput 72% --> 74% (7404/10000) def forward(self, x): ## Define forward behavior #convolutional layers x = self.maxpool(F.relu(self.conv1(x))) x = self.maxpool(F.relu(self.conv2(x))) x = self.maxpool(F.relu(self.conv3(x))) #flatten x = x.view(-1, 64 * 14 * 14) # x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.fc3(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I used three convolutional layers, each with a kernel size of 3 X 3. To limit the size of the connections to the first fully connected layer, the images size is reduced by a factor of two four times. The first reduction from using a stride of 2 and the last three in pooling layers. As each convolution layer is performed, I incresed the number of channels (3 -> 16 -> 32 -> 64). the model ends in two Fully connected layers and an Output layer. Dropout is used in the two FC layers. The logic being these layers are the "classifier", similar to the models found in the pretrained models. Since we are only interested in the most likely class, I didn't use softmax. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf if use_cuda: print("using GPU for training.") model = model.cuda() for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss #print("data.shape: {}, output.shape: {}, target.shape: {}".format(data.shape, output.shape, target.shape)) loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss)) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print(' Validation loss decreased ({:.6f} --> {:.6f}). Saving model.'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model loaders_scratch = get_loaders() model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') ###Output using GPU for training. Epoch: 1 Training Loss: 4.071551 Validation Loss: 3.956203 Validation loss decreased (inf --> 3.956203). Saving model. Epoch: 2 Training Loss: 3.990791 Validation Loss: 3.942572 Validation loss decreased (3.956203 --> 3.942572). Saving model. Epoch: 3 Training Loss: 3.964867 Validation Loss: 3.803804 Validation loss decreased (3.942572 --> 3.803804). Saving model. Epoch: 4 Training Loss: 3.918895 Validation Loss: 3.813549 Epoch: 5 Training Loss: 3.875283 Validation Loss: 3.705367 Validation loss decreased (3.803804 --> 3.705367). Saving model. ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % (100. * correct / total, correct, total)) # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # call test function loaders_scratch = get_loaders() test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.740381 Test Accuracy: 11% (95/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import os from torchvision import datasets def get_loaders(): train_dir = images_dir + 'dog_images/train' valid_dir = images_dir + 'dog_images/valid' test_dir = images_dir + 'dog_images/test' train_transform = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) valid_transform = transforms.Compose([transforms.Resize(240, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transform = transforms.Compose([transforms.Resize(240, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) train_dataset = datasets.ImageFolder(train_dir, transform=train_transform) valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transform) test_dataset = datasets.ImageFolder(test_dir, transform=test_transform) trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=64, shuffle=True) testloader = torch.utils.data.DataLoader(test_dataset, batch_size=64) loaders = {} loaders['train'] = trainloader loaders['valid'] = validloader loaders['test'] = testloader return loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn from collections import OrderedDict ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) # Freeze parameters so we don't backprop through them for param in model_transfer.parameters(): param.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(25088, 512)), ('relu1', nn.ReLU()), ('drop1', nn.Dropout(0.25)), ('fc2', nn.Linear(512, 512)), ('relu2', nn.ReLU()), ('drop2', nn.Dropout(0.25)), ('fc3', nn.Linear(512, 133)), ('output', nn.LogSoftmax(dim=1)) ])) model_transfer.classifier = classifier if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I used three convolutional layers, each with a kernel size of 3 X 3. To limit the size of the connections to the first fully connected layer, the images size is reduced by a factor of two four times. The first reduction from using a stride of 2 and the last three in pooling layers. As each convolution layer is performed, I incresed the number of channels (3 -> 16 -> 32 -> 64). the model ends in two Fully connected layers and an Output layer. Dropout is used in the two FC layers. The logic being these layers are the "classifier", similar to the models found in the pretrained models. Since we are only interested in the most likely class, I didn't use softmax. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code #criterion_transfer = nn.CrossEntropyLoss() criterion_transfer = nn.NLLLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 40 loaders_transfer = get_loaders() model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output using GPU for training. Epoch: 1 Training Loss: 3.405878 Validation Loss: 1.403021 Validation loss decreased (inf --> 1.403021). Saving model. Epoch: 2 Training Loss: 2.104105 Validation Loss: 0.918029 Validation loss decreased (1.403021 --> 0.918029). Saving model. Epoch: 3 Training Loss: 1.879061 Validation Loss: 0.915157 Validation loss decreased (0.918029 --> 0.915157). Saving model. Epoch: 4 Training Loss: 1.717760 Validation Loss: 0.953156 Epoch: 5 Training Loss: 1.673907 Validation Loss: 0.749019 Validation loss decreased (0.915157 --> 0.749019). Saving model. Epoch: 6 Training Loss: 1.585493 Validation Loss: 0.709573 Validation loss decreased (0.749019 --> 0.709573). Saving model. Epoch: 7 Training Loss: 1.469464 Validation Loss: 0.701335 Validation loss decreased (0.709573 --> 0.701335). Saving model. Epoch: 8 Training Loss: 1.491036 Validation Loss: 0.789658 Epoch: 9 Training Loss: 1.510851 Validation Loss: 0.785142 Epoch: 10 Training Loss: 1.462424 Validation Loss: 0.733289 Epoch: 11 Training Loss: 1.385584 Validation Loss: 0.607732 Validation loss decreased (0.701335 --> 0.607732). Saving model. Epoch: 12 Training Loss: 1.364234 Validation Loss: 0.665905 Epoch: 13 Training Loss: 1.416006 Validation Loss: 0.705561 Epoch: 14 Training Loss: 1.376420 Validation Loss: 0.643468 Epoch: 15 Training Loss: 1.339352 Validation Loss: 0.685768 Epoch: 16 Training Loss: 1.376158 Validation Loss: 0.707505 Epoch: 17 Training Loss: 1.361860 Validation Loss: 0.676373 Epoch: 18 Training Loss: 1.345691 Validation Loss: 0.543503 Validation loss decreased (0.607732 --> 0.543503). Saving model. Epoch: 19 Training Loss: 1.311551 Validation Loss: 0.639891 Epoch: 20 Training Loss: 1.311502 Validation Loss: 0.599640 Epoch: 21 Training Loss: 1.331311 Validation Loss: 0.626924 Epoch: 22 Training Loss: 1.292631 Validation Loss: 0.677545 Epoch: 23 Training Loss: 1.325862 Validation Loss: 0.580967 Epoch: 24 Training Loss: 1.311716 Validation Loss: 0.605024 Epoch: 25 Training Loss: 1.276306 Validation Loss: 0.714142 Epoch: 26 Training Loss: 1.367846 Validation Loss: 0.628056 Epoch: 27 Training Loss: 1.352399 Validation Loss: 0.613817 Epoch: 28 Training Loss: 1.293521 Validation Loss: 0.643420 Epoch: 29 Training Loss: 1.289478 Validation Loss: 0.640277 Epoch: 30 Training Loss: 1.285995 Validation Loss: 0.735112 Epoch: 31 Training Loss: 1.297823 Validation Loss: 0.634062 Epoch: 32 Training Loss: 1.335037 Validation Loss: 0.682760 Epoch: 33 Training Loss: 1.252052 Validation Loss: 0.586761 Epoch: 34 Training Loss: 1.270917 Validation Loss: 0.680406 Epoch: 35 Training Loss: 1.312570 Validation Loss: 0.714398 Epoch: 36 Training Loss: 1.314148 Validation Loss: 0.694690 Epoch: 37 Training Loss: 1.293907 Validation Loss: 0.686097 Epoch: 38 Training Loss: 1.244946 Validation Loss: 0.716214 Epoch: 39 Training Loss: 1.288779 Validation Loss: 0.639873 Epoch: 40 Training Loss: 1.247211 Validation Loss: 0.625923 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy (uncomment the line below) loaders_transfer = get_loaders() model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.785108 Test Accuracy: 79% (664/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] #class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] train_dir = images_dir + 'dog_images/train' class_names = [] names = os.listdir(train_dir) names.sort() #print(names) for o in names: index = int(o[:3]) name = o[4:].replace("_", " ") class_names.append(name) def predict_breed_transfer(img_path): # load the image and return the predicted breed img_tensor = preprocess_image(img_path) if next(model_transfer.parameters()).is_cuda: img_tensor = img_tensor.cuda() output = model_transfer(img_tensor) prediction_index = np.argmax(output[0].cpu().detach().numpy()) #return class_names[prediction_index-1] return class_names[prediction_index] def predict_breed_scratch(img_path): # load the image and return the predicted breed img_tensor = preprocess_image(img_path) if next(model_scratch.parameters()).is_cuda: img_tensor = img_tensor.cuda() output = model_scratch(img_tensor) prediction_index = np.argmax(output[0].cpu().detach().numpy()) #return class_names[prediction_index-1] return class_names[prediction_index] # use the function to see how well it works... for i in range(100, 140): if not i % 10 == 0: continue img_path = dog_files[i] breed_transfer = predict_breed_transfer(img_path) breed_scratch = predict_breed_scratch(img_path) print("scratch: {}\tprediction: {}".format(img_path[22:], breed_scratch)) print("transfer: {}\tprediction: {}".format(img_path[22:], breed_transfer)) print("") ###Output scratch: Ibizan_hound/Ibizan_hound_05697.jpg prediction: Basenji transfer: Ibizan_hound/Ibizan_hound_05697.jpg prediction: Ibizan hound scratch: English_toy_spaniel/English_toy_spaniel_04509.jpg prediction: Gordon setter transfer: English_toy_spaniel/English_toy_spaniel_04509.jpg prediction: English toy spaniel scratch: Australian_cattle_dog/Australian_cattle_dog_00730.jpg prediction: Bullmastiff transfer: Australian_cattle_dog/Australian_cattle_dog_00730.jpg prediction: Australian cattle dog scratch: American_water_spaniel/American_water_spaniel_00631.jpg prediction: Affenpinscher transfer: American_water_spaniel/American_water_spaniel_00631.jpg prediction: Irish water spaniel ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): is_dog = dog_detector(img_path) is_human = face_detector(img_path) is_unknown = (is_dog and is_human) or (not is_dog and not is_human) breed_transfer = predict_breed_transfer(img_path) breed_scratch = predict_breed_scratch(img_path) # load image img = cv2.imread(img_path) plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) ## handle cases for a human face, dog, and neither if is_unknown: print("hello, we aren't sure what you are.") plt.show() print("You kind of look like a...") print(" {}".format(breed_scratch)) if breed_transfer != breed_scratch: print("But more likely, you look like a...") print(" {}".format(breed_transfer)) elif is_dog: print("hello, dog.") plt.show() print("You might be a ...") print(" {}".format(breed_scratch)) if breed_transfer != breed_scratch: print("But more likely, you are a...") print(" {}".format(breed_transfer)) else: print("hello, human.") plt.show() print("You you look like a ...") print(" {}".format(breed_scratch)) if breed_transfer != breed_scratch: print("or a...") print(" {}".format(breed_transfer)) print("\n\n") # use the function to see how well it works... # for i in range(10): # if not i % 2 == 0: # img_path = dog_files[i+10] # else: # img_path = human_files[i+10] # run_app(img_path) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Overall, I think the accuracy of my scratch model did not do as well as I had hoped. The transfer model did better than I thought it would given how low the accuracy of the scratch model was. It shows how important the model's architecture is and how hard it can be to come up with the most effective architecture. It also points out how effective it can be to leverage a pre-existing model against a similar problem.1) I could have experimented more with additional classifier layers or neurons in the transfer learning model. 2) I could have experimented with models other than VGG16 for the transfer model.3) The models seemed to train after only a few epochs. Some additinal gains might be had by lowering the learning rate and running a couple more epochs. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ### for fun, get an image from a URL and classify it... import requests def preprocess_image_url(img_url): ''' Given a url an image file, returns a tensor that can be used in one of the Pytorch torchvision models Args: img_url: the url of an image Returns: A tensor that can be used in one of the Pytorch torchvision models ''' resp = requests.get(img_url, stream=True) # Set decode_content value to True, otherwise the downloaded image file's size will be zero. resp.raw.decode_content = True img = Image.open(resp.raw) # Remove the image url response object. del resp transform = transforms.Compose([ transforms.Resize(224, interpolation=2), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_tensor = transform(img) img_tensor = img_tensor.view(1, 3, 224, 224) #add in a dimension for batch size. return img_tensor def predict_breed_transfer_url(img_url): # load the image and return the predicted breed img_tensor = preprocess_image_url(img_url) if next(model_transfer.parameters()).is_cuda: img_tensor = img_tensor.cuda() output = model_transfer(img_tensor) prediction_index = np.argmax(output[0].cpu().detach().numpy()) #return class_names[prediction_index-1] return class_names[prediction_index] #url = 'https://vetstreet.brightspotcdn.com/dims4/default/b910089/2147483647/thumbnail/645x380/quality/90/?url=https%3A%2F%2Fvetstreet-brightspot.s3.amazonaws.com%2F11%2F3adc50a41a11e087a80050568d634f%2Ffile%2FAmercian-Eskimo-2-062311.jpg' url = 'https://petstruggles.com/wp-content/uploads/2019/08/Best_Dog_Brush_for_Corgi_Pet_Struggles.jpg' #url = 'https://d17fnq9dkz9hgj.cloudfront.net/breed-uploads/2018/08/shiba-inu-detail.jpg?bust=1535566568&width=355' breed = predict_breed_transfer_url(url) print(url) print(breed) ###Output https://petstruggles.com/wp-content/uploads/2019/08/Best_Dog_Brush_for_Corgi_Pet_Struggles.jpg Pembroke welsh corgi ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code # from tqdm import tqdm import numpy as np human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def success_rate(array): acc = 0.00 for file in array: if face_detector(file) == True: acc += 0.01 print(acc) print("Success rate for human image") success_rate(human_files_short) print("Success rate for dog image") success_rate(dog_files_short) ###Output Success rate for human image 0.9900000000000007 Success rate for dog image 0.18000000000000002 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /Users/assuntawalderdorff/.cache/torch/hub/checkpoints/vgg16-397923af.pth ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset #print(dog_files) print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Human face detect 98%Dog face detect 17%(You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #print((human_files_short[1])) op= np.array([face_detector(human_files_short[i]) for i in range(100)]) print("Human face detect",np.mean(op)*100) op2= np.array([face_detector(dog_files_short[i]) for i in range(100)]) print("Dog face detect",np.mean(op2)*100) #print(human_files_short) #-#-# Do NOT modify the code above this line. #-#-# #face_detector(human_files_short) ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output Human face detect 98.0 Dog face detect 17.0 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print(VGG16.parameters) ###Output <bound method Module.parameters of VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) )> ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms from torchvision import datasets #print(VGG16) def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' img_pil = Image.open(img_path) #data_transform = transforms.Compose([transforms.RandomResizedCrop(224),transforms.ToTensor()]) preprocess = transforms.Compose([transforms.RandomResizedCrop(224),transforms.ToTensor()]) #print("**********************") #test_data = datasets.ImageFolder(img_path, transform=data_transform) #print(preprocess(img_pil)) img_tensor = preprocess(img_pil) #print(img_tensor.shape) img_tensor.unsqueeze_(0) if(use_cuda): img_tensor=img_tensor.cuda() output = VGG16.forward(img_tensor) ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return output # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. out=(torch.argmax(VGG16_predict(img_path))).data.cpu().numpy() out_val =True if(out>=151 and out<=268) else False return out_val # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Percentage of images in human_file_short that have detected dog is 0% while precentage of images in dog_files_short that have detected dog is 79% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. op= np.array([dog_detector(human_files_short[i]) for i in range(100)]) print("Human face detect",np.mean(op)*100) op2= np.array([dog_detector(dog_files_short[i]) for i in range(100)]) print("Dog face detect",np.mean(op2)*100) ###Output Human face detect 0.0 Dog face detect 79.0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code #Resnet50 model import torch import torchvision.models as models res = models.resnet50(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: res = res.cuda() for param in res.parameters(): param.requires_grad = False res.eval() def resnet_predict(img_path): #print(img_path) img_pil = Image.open(img_path) preprocess = transforms.Compose([transforms.RandomResizedCrop(224),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img_tensor = preprocess(img_pil) #print(img_tensor.shape) img_tensor.unsqueeze_(0) #print(img_tensor.shape) if(use_cuda): img_tensor=img_tensor.cuda() #print(img_tensor.shape) output = res(img_tensor) #print(output.shape) return output # predicted class index #Resnet50 model import numpy as np def dog_detector_Resnet(img_path): out=(torch.argmax(resnet_predict(img_path))).data.cpu().numpy() #print(out) out_val =True if(out>=151 and out<=268) else False return out_val human_files_short = human_files[:100] dog_files_short = dog_files[:100] op2= np.array([dog_detector_Resnet(dog_files_short[i]) for i in range(100)]) op4= np.array([dog_detector_Resnet(human_files_short[i]) for i in range(100)]) print("Dog face detect",np.mean(op2)*100) print("Human face detect",np.mean(op4)*100) #Inception_v3 import torch import torchvision.models as models incept = models.inception_v3(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: incept = incept.cuda() incept.eval() def incept_predict(img_path): #print(img_path) img_pil = Image.open(img_path) preprocess = transforms.Compose([transforms.RandomResizedCrop(299),transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img_tensor = preprocess(img_pil) #print(img_tensor.shape) img_tensor.unsqueeze_(0) #print(img_tensor.shape) if(use_cuda): img_tensor=img_tensor.cuda() #print(img_tensor.shape) output = incept(img_tensor) #print(output.shape) return output # predicted class index #Inception_v3 import numpy as np import torch def dog_detector_Inception(img_path): out=(torch.argmax(incept_predict(img_path))).data.cpu().numpy() #print(out) out_val =True if(out>=151 and out<=268) else False return out_val human_files_short = human_files[:100] dog_files_short = dog_files[:100] op2= np.array([dog_detector_Inception(dog_files_short[i]) for i in range(100)]) op4= np.array([dog_detector_Inception(human_files_short[i]) for i in range(100)]) print("Dog face detect",np.mean(op2)*100) print("Human face detect",np.mean(op4)*100) ###Output Dog face detect 96.0 Human face detect 0.0 ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import numpy as np from torchvision import datasets,transforms import matplotlib.pyplot as plt import torch from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True preprocess_train = transforms.Compose([transforms.RandomResizedCrop(227), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) preprocess_val_test = transforms.Compose([transforms.RandomResizedCrop(227), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder('dogImages/train',preprocess_train) valid_data = datasets.ImageFolder('dogImages/valid',preprocess_val_test) test_data = datasets.ImageFolder('dogImages/test',preprocess_val_test) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size =64 loaders ={} loaders['train'] =torch.utils.data.DataLoader(train_data,batch_size=batch_size,num_workers=0,shuffle=True) loaders['valid'] =torch.utils.data.DataLoader(valid_data,batch_size=batch_size,num_workers=0,shuffle=True) loaders['test'] =torch.utils.data.DataLoader(test_data,batch_size=batch_size,num_workers=0,shuffle=True) print(len(train_data)) print(len(valid_data)) print(len(test_data)) print(len(loaders['train'])) ###Output 6680 835 836 105 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: The image was resized to (227,227,3) by cropping as the initial size of the image was larger than this.Yes the dataset was augmented through translation as well as by rotation. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self,layers): super(Net, self).__init__() ## Define layers of a CNN self.layers = layers self.classif =nn.Sequential(nn.Linear(12800,4096), nn.BatchNorm1d(4096), nn.ReLU(inplace=True), nn.Linear(4096,512), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512,133) # ,nn.Softmax(dim=1) ) def forward(self, x): ## Define forward behavior #print("BEFORE CONV",x.shape) x =self.layers(x) #print(x.shape) x=x.view(-1,12800) #print("2Dinput",x.shape) x=self.classif(x) #print(x.shape) return x def add_layer(layer_size,kernel_size=3,padding=1,): layer =[] in_channel =3 for i in layer_size: #print(in_channel,i) if(i=='A'): layer += [nn.AvgPool2d(kernel_size=2, stride=2),nn.BatchNorm2d(in_channel)] elif(i=='P'): layer +=[nn.MaxPool2d(kernel_size=2, stride=2)] else: layer +=[nn.Conv2d(in_channel,i,kernel_size=3,stride=1),nn.BatchNorm2d(i), nn.ReLU(inplace=True)] in_channel =i return nn.Sequential(*layer) #layer_size=[64,64,'P'] layer_size=[16,16,'P',32,32,'P',64,64,'P',128,128,'A']#,1] #add_layer(layer_size) #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net(add_layer(layer_size)) print(model_scratch.parameters) # move tensors to GPU if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_scratch.cuda() ###Output <bound method Module.parameters of Net( (layers): Sequential( (0): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1)) (1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU(inplace) (3): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1)) (4): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (5): ReLU(inplace) (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (7): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1)) (8): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (9): ReLU(inplace) (10): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1)) (11): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (12): ReLU(inplace) (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (14): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1)) (15): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (16): ReLU(inplace) (17): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1)) (18): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (19): ReLU(inplace) (20): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (21): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1)) (22): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (23): ReLU(inplace) (24): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1)) (25): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (26): ReLU(inplace) (27): AvgPool2d(kernel_size=2, stride=2, padding=0) (28): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (classif): Sequential( (0): Linear(in_features=12800, out_features=4096, bias=True) (1): BatchNorm1d(4096, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): ReLU(inplace) (3): Linear(in_features=4096, out_features=512, bias=True) (4): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (5): ReLU(inplace) (6): Linear(in_features=512, out_features=133, bias=True) ) )> ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ 1 The image size was initially cropped to (227,227,3). Then I had used a series of Conv2d,Conv2d,MaxPool layer similar to that to VGG19 with a difference of the filter size. Batch normalization was also performed after every convolution layer in order to ensure that it can converge faster. As a rule of thumb, in the initial layers the height,width is larger and filter size is small but in the later layer the height,width decreases and filter size increases. The filter size was increased in a step of 2 from 16 to 32 to 64 and finally to 128.The initial layers are used for detecting the edges, the mid layers are used to detect the eyes,nose,ears etc of the dog.For the classification purpose fully connected layers are used. The hidden layers for classification was 2048 and 1024 in size. Finally as there were 133 dog images so I the output layer 133 in size. __2__ The validation as well as the training loss was not decreasing with this architecture when I trained until 15 epoch so it meant it was in high bias. Thus I increased the batch size from 10 (initially) to 64(final).This resulted in the decrease of training and validation loss. Also learning rate was decreased to 0.001 in order to achieve a high validation accuracy. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import numpy as np from torch import optim from torch.optim import lr_scheduler criterion_scratch = nn.CrossEntropyLoss() optimizer_scratch = optim.Adam(model_scratch.parameters(),lr=0.0008) scheduler = lr_scheduler.StepLR(optimizer_scratch, step_size=4, gamma=0.1) #optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr = 0.2, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion,scheduler, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### scheduler.step() model.train() for batch_idx, (data, target) in enumerate(loaders['train']): #print(batch_idx) # move to GPU #print("DATA",data.shape) if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like optimizer.zero_grad() predict_out =model.forward(data) loss =criterion(predict_out,target) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) loss.backward() optimizer.step() del data, target print("TRAIN_LOSS",loss) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss outp= model.forward(data) loss=criterion(outp,target) #print(loss) valid_loss =valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) #print(batch_idx ,valid_loss) del data, target # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(20, loaders,model_scratch, optimizer_scratch, criterion_scratch,scheduler,use_cuda, 'model_scratch_my_model.pt') ###Output TRAIN_LOSS tensor(4.1974, device='cuda:0') Epoch: 1 Training Loss: 3.724537 Validation Loss: 3.930086 TRAIN_LOSS tensor(3.3956, device='cuda:0') Epoch: 2 Training Loss: 3.599635 Validation Loss: 3.834121 TRAIN_LOSS tensor(4.1135, device='cuda:0') Epoch: 3 Training Loss: 3.502737 Validation Loss: 3.755331 TRAIN_LOSS tensor(3.5218, device='cuda:0') Epoch: 4 Training Loss: 3.464948 Validation Loss: 3.720370 TRAIN_LOSS tensor(3.3302, device='cuda:0') Epoch: 5 Training Loss: 3.268178 Validation Loss: 3.396614 TRAIN_LOSS tensor(3.5840, device='cuda:0') Epoch: 6 Training Loss: 3.182151 Validation Loss: 3.494653 TRAIN_LOSS tensor(3.3258, device='cuda:0') Epoch: 7 Training Loss: 3.145743 Validation Loss: 3.390511 TRAIN_LOSS tensor(3.9551, device='cuda:0') Epoch: 8 Training Loss: 3.128365 Validation Loss: 3.293134 TRAIN_LOSS tensor(3.3246, device='cuda:0') Epoch: 9 Training Loss: 3.084185 Validation Loss: 3.346034 TRAIN_LOSS tensor(3.4474, device='cuda:0') Epoch: 10 Training Loss: 3.089423 Validation Loss: 3.359021 TRAIN_LOSS tensor(3.1687, device='cuda:0') Epoch: 11 Training Loss: 3.098815 Validation Loss: 3.375568 TRAIN_LOSS tensor(3.1929, device='cuda:0') Epoch: 12 Training Loss: 3.072397 Validation Loss: 3.446180 TRAIN_LOSS tensor(3.0729, device='cuda:0') Epoch: 13 Training Loss: 3.074861 Validation Loss: 3.392154 TRAIN_LOSS tensor(3.0344, device='cuda:0') Epoch: 14 Training Loss: 3.078466 Validation Loss: 3.260917 TRAIN_LOSS tensor(3.1837, device='cuda:0') Epoch: 15 Training Loss: 3.070722 Validation Loss: 3.222085 TRAIN_LOSS tensor(3.9385, device='cuda:0') Epoch: 16 Training Loss: 3.076960 Validation Loss: 3.323963 TRAIN_LOSS tensor(3.1710, device='cuda:0') Epoch: 17 Training Loss: 3.085339 Validation Loss: 3.312486 TRAIN_LOSS tensor(2.9686, device='cuda:0') Epoch: 18 Training Loss: 3.067366 Validation Loss: 3.155230 TRAIN_LOSS tensor(3.6349, device='cuda:0') Epoch: 19 Training Loss: 3.065664 Validation Loss: 3.170555 TRAIN_LOSS tensor(3.1369, device='cuda:0') Epoch: 20 Training Loss: 3.051291 Validation Loss: 3.268748 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) test(loaders, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.295089 Test Accuracy: 19% (163/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import os import numpy as np from torchvision import datasets,transforms import matplotlib.pyplot as plt import torch from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True preprocess_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) preprocess_val_test = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder('dogImages/train',preprocess_train) valid_data = datasets.ImageFolder('dogImages/valid',preprocess_val_test) test_data = datasets.ImageFolder('dogImages/test',preprocess_val_test) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size =512 loaders ={} loaders['train'] =torch.utils.data.DataLoader(train_data,batch_size=batch_size,num_workers=0,shuffle=True) loaders['valid'] =torch.utils.data.DataLoader(valid_data,batch_size=batch_size,num_workers=0,shuffle=True) loaders['test'] =torch.utils.data.DataLoader(test_data,batch_size=batch_size,num_workers=0,shuffle=True) print(len(train_data)) print(len(valid_data)) print(len(test_data)) print(len(loaders['train'])) ###Output 6680 835 836 14 ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torch import torchvision.models as models import torch.nn as nn import numpy as np # check if CUDA is available use_cuda = torch.cuda.is_available() ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) print(model_transfer.parameters) for param in model_transfer.parameters(): param.requires_grad =False fc =nn.Sequential( nn.Linear(2048,1024), nn.BatchNorm1d(1024), nn.ReLU(inplace =True), nn.Linear(1024,133), ) model_transfer.fc=fc #for param in model_transfer.parameters(): # print(type(param) ,param.requires_grad) def weight_init(m): if(type(m)==nn.Linear): #print(m) n= m.in_features y=1/np.sqrt(n) m.weight.data.normal_(0,y) m.bias.data.fill_(0) model_transfer.apply(weight_init) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ For the ransfer learning I have used the Resnet50 architecture.The Resnet was output layerwas 1000 in size but the numbber of dog to be classified was 133.So I have connected a fully connected layer having a size of 2048,1024 and then the output layer was 133 in size. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import numpy as np from torch import optim from torch.optim import lr_scheduler criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.fc.parameters(),lr=0.06) scheduler = lr_scheduler.StepLR(optimizer_transfer, step_size=5, gamma=0.1) #optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr = 0.2, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model def train(n_epochs, loaders, model, optimizer, criterion,scheduler, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### scheduler.step() model.train() for batch_idx, (data, target) in enumerate(loaders['train']): #print(batch_idx) # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like optimizer.zero_grad() predict_out =model.forward(data) loss =criterion(predict_out,target) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) #print(train_loss) loss.backward() optimizer.step() print(loss) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss outp= model.forward(data) loss=criterion(outp,target) #print(loss) valid_loss =valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) #print(batch_idx ,valid_loss) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_transfer = train(18, loaders,model_transfer, optimizer_transfer, criterion_transfer,scheduler,use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy #model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output tensor(1.4720, device='cuda:0') Epoch: 1 Training Loss: 3.620361 Validation Loss: 1.776646 tensor(0.7258, device='cuda:0') Epoch: 2 Training Loss: 1.204702 Validation Loss: 1.548412 tensor(0.7072, device='cuda:0') Epoch: 3 Training Loss: 0.958104 Validation Loss: 1.335878 tensor(1.1823, device='cuda:0') Epoch: 4 Training Loss: 0.863628 Validation Loss: 1.152260 tensor(0.4266, device='cuda:0') Epoch: 5 Training Loss: 0.791878 Validation Loss: 1.150469 tensor(0.7827, device='cuda:0') Epoch: 6 Training Loss: 0.707096 Validation Loss: 0.949625 tensor(0.8512, device='cuda:0') Epoch: 7 Training Loss: 0.656516 Validation Loss: 0.968780 tensor(0.8908, device='cuda:0') Epoch: 8 Training Loss: 0.602280 Validation Loss: 0.901228 tensor(0.6047, device='cuda:0') Epoch: 9 Training Loss: 0.564280 Validation Loss: 0.895016 tensor(1.6077, device='cuda:0') Epoch: 10 Training Loss: 0.642895 Validation Loss: 0.917992 tensor(0.5920, device='cuda:0') Epoch: 11 Training Loss: 0.565706 Validation Loss: 0.935176 tensor(0.5935, device='cuda:0') Epoch: 12 Training Loss: 0.539019 Validation Loss: 0.997605 tensor(0.7407, device='cuda:0') Epoch: 13 Training Loss: 0.563939 Validation Loss: 0.956721 tensor(0.7797, device='cuda:0') Epoch: 14 Training Loss: 0.561679 Validation Loss: 0.887260 tensor(0.6553, device='cuda:0') Epoch: 15 Training Loss: 0.540775 Validation Loss: 0.837890 tensor(2.1922, device='cuda:0') Epoch: 16 Training Loss: 0.643589 Validation Loss: 0.875466 tensor(0.5793, device='cuda:0') Epoch: 17 Training Loss: 0.518998 Validation Loss: 0.964883 tensor(1.0518, device='cuda:0') Epoch: 18 Training Loss: 0.566236 Validation Loss: 0.801014 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.866201 Test Accuracy: 76% (640/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code #class_names = [item[4:].replace("_", " ") for item in loaders['train']] ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. model_transfer.load_state_dict(torch.load('model_transfer.pt',map_location=lambda storage, loc: storage)) # list of class names by index, i.e. a name can be accessed like class_names[0] from glob import glob from PIL import Image from torch.autograd import Variable def predict_breed_transfer(image_name): """load image, returns cuda tensor""" image = Image.open(image_name) image = preprocess_val_test(image).float() image = Variable(image, requires_grad=True) image = image.unsqueeze(0) # print(image.shape) model_transfer.eval() if use_cuda: image =image.cuda() output = model_transfer(image) _, preds_tensor = torch.max(output, 1) preds = np.squeeze(preds_tensor.numpy() if not torch.cuda.is_available() else np.squeeze(preds_tensor.cpu().numpy())) dic ={} for item in glob("dogImages/train/*",recursive=True): #print(tuple(item[16:].split('.'))) key,value=tuple(item[16:].split('.')) key =int(key) dic[key] =value return dic[int(preds)] print(predict_breed_transfer('images/Brittany_02625.jpg')) op= np.array([predict_breed_transfer(human_files_short[i]) for i in range(100)]) print(op) ###Output ['Brittany' 'Chesapeake_bay_retriever' 'American_staffordshire_terrier' 'Flat-coated_retriever' 'Doberman_pinscher' 'Basset_hound' 'Brittany' 'Chinese_crested' 'Borzoi' 'Australian_terrier' 'Brittany' 'Norwegian_lundehund' 'Irish_wolfhound' 'Norwegian_lundehund' 'Cardigan_welsh_corgi' 'Cardigan_welsh_corgi' 'Doberman_pinscher' 'Australian_terrier' 'Dogue_de_bordeaux' 'Bulldog' 'Clumber_spaniel' 'Entlebucher_mountain_dog' 'Chesapeake_bay_retriever' 'Chinese_crested' 'Doberman_pinscher' 'Wirehaired_pointing_griffon' 'Bullmastiff' 'Norwegian_lundehund' 'Norwegian_lundehund' 'Chesapeake_bay_retriever' 'Brittany' 'Doberman_pinscher' 'Brittany' 'Petit_basset_griffon_vendeen' 'Doberman_pinscher' 'Doberman_pinscher' 'Irish_red_and_white_setter' 'Australian_terrier' 'American_staffordshire_terrier' 'Norwegian_lundehund' 'Chesapeake_bay_retriever' 'Basenji' 'Neapolitan_mastiff' 'Irish_wolfhound' 'Cardigan_welsh_corgi' 'Chesapeake_bay_retriever' 'Cardigan_welsh_corgi' 'Affenpinscher' 'Affenpinscher' 'Chesapeake_bay_retriever' 'Chesapeake_bay_retriever' 'Chesapeake_bay_retriever' 'Affenpinscher' 'Chesapeake_bay_retriever' 'Basset_hound' 'Affenpinscher' 'Entlebucher_mountain_dog' 'Affenpinscher' 'Entlebucher_mountain_dog' 'Black_russian_terrier' 'English_setter' 'Boykin_spaniel' 'American_staffordshire_terrier' 'Irish_terrier' 'Doberman_pinscher' 'Norwegian_lundehund' 'Doberman_pinscher' 'Doberman_pinscher' 'Curly-coated_retriever' 'Irish_water_spaniel' 'Doberman_pinscher' 'Doberman_pinscher' 'Chinese_crested' 'Doberman_pinscher' 'Curly-coated_retriever' 'American_foxhound' 'Chinese_crested' 'American_staffordshire_terrier' 'Norwegian_lundehund' 'Brittany' 'Doberman_pinscher' 'Doberman_pinscher' 'Chinese_crested' 'Brittany' 'Brittany' 'Chinese_crested' 'Keeshond' 'Australian_terrier' 'Irish_terrier' 'Bullmastiff' 'Australian_cattle_dog' 'American_staffordshire_terrier' 'Australian_terrier' 'Cardigan_welsh_corgi' 'Cardigan_welsh_corgi' 'Curly-coated_retriever' 'Bearded_collie' 'Australian_terrier' 'Chinese_crested' 'Australian_cattle_dog'] ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if(dog_detector_Resnet(img_path) ==True): print("Dog detected and the predicted breed is ",predict_breed_transfer(img_path)) elif(face_detector(img_path)==True): print("Humar detected and the resembling dog breed is ",predict_breed_transfer(img_path)) else: print("Neither Human nor a dog") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The performance was good.We could try with thedifferentlearning rate and plot the graph to see which learning rate could be the best.An app could also be build for the improvement.Also in developing CNN from scratch I could have used the droput layer in order to prevent overfitting ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output Humar detected and the resembling dog breed is Chesapeake_bay_retriever Humar detected and the resembling dog breed is Irish_wolfhound Humar detected and the resembling dog breed is Irish_terrier Dog detected and the predicted breed is Bulldog Dog detected and the predicted breed is Manchester_terrier Neither Human nor a dog ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)96% of human_files_short detected as containing a face and 18% of dog_files_short detected as containing a face. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_face_count = sum([face_detector(i) for i in human_files_short]) print(f"{human_face_count} human faces found in first 100 human images") dog_face_count = sum([face_detector(i) for i in dog_files_short]) print(f"{dog_face_count} human faces found in first 100 dog images") ###Output 96 human faces found in first 100 human images 18 human faces found in first 100 dog images ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path img = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) img = in_transform(img)[:3,:,:].unsqueeze(0) if use_cuda: img = img.cuda() ## Return the *index* of the predicted class for that image output = VGG16(img) _, predicted_class = torch.max(output, 1) return predicted_class # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return VGG16_predict(img_path) in range(151, 269) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 0% of human_files_short detected as containing a dog and 95% of dog_files_short detected as containing a dog. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_face_count = sum([dog_detector(i) for i in human_files_short]) print(f"{human_face_count} percent of human images detected as containing dog") dog_face_count = sum([dog_detector(i) for i in dog_files_short]) print(f"{dog_face_count} percent of dog images detected as containing dog") ###Output 0 percent of human images detected as containing dog 95 percent of dog images detected as containing dog ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torch.utils.data import DataLoader from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes normalization_transform = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) train_dog_data = datasets.ImageFolder(root="dogImages/train", transform=transforms.Compose([transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalization_transform])) test_dog_data = datasets.ImageFolder(root="dogImages/test", transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalization_transform])) valid_dog_data = datasets.ImageFolder(root="dogImages/valid", transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalization_transform])) train_dog_loader = DataLoader(train_dog_data, batch_size=20, shuffle=True) test_dog_loader = DataLoader(test_dog_data, batch_size=20, shuffle=True) valid_dog_loader = DataLoader(valid_dog_data, batch_size=20, shuffle=True) loaders_scratch = {'train': train_dog_loader, 'valid': valid_dog_loader, 'test': test_dog_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: The images are resized to 256 by 256 and then cropped to 224 by 224. The input tensor is 224 by 224 with 3 colour channels. This makes the input tensor the same size as would be used for pretrained models like VGG and so should be reasonable to make a comparison. The training dataset is augmented by performing a random crop which will lead to a variation in horizontal and vertical position of the dog. Also a random horizontal flip is performed on the training dataset to give variation on the orientation of the dog. The validation and test data sets have not been augmented as these are used to assess how well the model is performing. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 5) self.conv3 = nn.Conv2d(64, 128, 5, padding=1) self.conv4 = nn.Conv2d(128, 256, 7) self.conv5 = nn.Conv2d(256, 512, 7, padding=2) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(4 * 4 * 512, 512) self.fc2 = nn.Linear(512, 133) self.dropout = nn.Dropout(p=0.25) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) x = x.view(-1, 4 * 4 * 512) x = self.dropout(x) x = self.dropout(F.relu(self.fc1(x))) x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The network is formed of 5 convolutional layers and 2 linear layers for classification. The convolutional layers start with a kernel size of 3 for basic feature detection and this increases in later layers to 5 and then 7 for more complicated feature detection. Padding has been applied to some convolutional layers to help achieve sizing which can be halved by maxpool. The first layer increases the number of channels to 32 and then later layers double this until 512. The images are reduced to a tensor of 4 by 4 by 512 channels by the end of the convolutional layers. The classifier layers decrease the input to 512 features and then 133 to match the number of classes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) train_loss = train_loss + ((1/(batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1/(batch_idx + 1)) * (loss.data - valid_loss)) # train_loss = train_loss/len(loaders['train'].sampler) # valid_loss = valid_loss/len(loaders['valid'].sampler) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) # return trained model return model # train the model n_epochs = 50 model_scratch = train(n_epochs, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.888999 Validation Loss: 4.885742 Epoch: 2 Training Loss: 4.879220 Validation Loss: 4.862939 Epoch: 3 Training Loss: 4.833145 Validation Loss: 4.754681 Epoch: 4 Training Loss: 4.738865 Validation Loss: 4.693149 Epoch: 5 Training Loss: 4.677740 Validation Loss: 4.617553 Epoch: 6 Training Loss: 4.561268 Validation Loss: 4.449966 Epoch: 7 Training Loss: 4.474662 Validation Loss: 4.422789 Epoch: 8 Training Loss: 4.428460 Validation Loss: 4.454816 Epoch: 9 Training Loss: 4.375608 Validation Loss: 4.321389 Epoch: 10 Training Loss: 4.340864 Validation Loss: 4.327600 Epoch: 11 Training Loss: 4.302265 Validation Loss: 4.271610 Epoch: 12 Training Loss: 4.262475 Validation Loss: 4.215666 Epoch: 13 Training Loss: 4.203724 Validation Loss: 4.157260 Epoch: 14 Training Loss: 4.162832 Validation Loss: 4.157262 Epoch: 15 Training Loss: 4.109228 Validation Loss: 4.079955 Epoch: 16 Training Loss: 4.045836 Validation Loss: 4.027072 Epoch: 17 Training Loss: 3.988782 Validation Loss: 4.048889 Epoch: 18 Training Loss: 3.942562 Validation Loss: 3.914874 Epoch: 19 Training Loss: 3.881806 Validation Loss: 3.958043 Epoch: 20 Training Loss: 3.822155 Validation Loss: 4.017154 Epoch: 21 Training Loss: 3.761681 Validation Loss: 3.928769 Epoch: 22 Training Loss: 3.697942 Validation Loss: 3.878833 Epoch: 23 Training Loss: 3.649371 Validation Loss: 3.698668 Epoch: 24 Training Loss: 3.574092 Validation Loss: 3.708979 Epoch: 25 Training Loss: 3.495651 Validation Loss: 3.734290 Epoch: 26 Training Loss: 3.428197 Validation Loss: 3.626797 Epoch: 27 Training Loss: 3.356533 Validation Loss: 3.548769 Epoch: 28 Training Loss: 3.297219 Validation Loss: 3.666188 Epoch: 29 Training Loss: 3.217206 Validation Loss: 3.505220 Epoch: 30 Training Loss: 3.139154 Validation Loss: 3.554886 Epoch: 31 Training Loss: 3.094743 Validation Loss: 3.508626 Epoch: 32 Training Loss: 3.006395 Validation Loss: 3.572104 Epoch: 33 Training Loss: 2.924520 Validation Loss: 3.536873 Epoch: 34 Training Loss: 2.853004 Validation Loss: 3.476658 Epoch: 35 Training Loss: 2.779621 Validation Loss: 3.429744 Epoch: 36 Training Loss: 2.729995 Validation Loss: 3.437916 Epoch: 37 Training Loss: 2.643965 Validation Loss: 3.404952 Epoch: 38 Training Loss: 2.557295 Validation Loss: 3.475810 Epoch: 39 Training Loss: 2.430711 Validation Loss: 3.507876 Epoch: 40 Training Loss: 2.410506 Validation Loss: 3.346870 Epoch: 41 Training Loss: 2.311825 Validation Loss: 3.409185 Epoch: 42 Training Loss: 2.253547 Validation Loss: 3.427869 Epoch: 43 Training Loss: 2.189887 Validation Loss: 3.424271 Epoch: 44 Training Loss: 2.095409 Validation Loss: 3.323129 Epoch: 45 Training Loss: 2.005786 Validation Loss: 3.463596 Epoch: 46 Training Loss: 1.941447 Validation Loss: 3.417195 Epoch: 47 Training Loss: 1.923979 Validation Loss: 3.455123 Epoch: 48 Training Loss: 1.828490 Validation Loss: 3.533992 Epoch: 49 Training Loss: 1.762469 Validation Loss: 3.519154 Epoch: 50 Training Loss: 1.662245 Validation Loss: 3.512939 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.457604 Test Accuracy: 22% (191/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders normalization_transform = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) train_dog_data = datasets.ImageFolder(root="dogImages/train", transform=transforms.Compose([transforms.Resize(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalization_transform])) test_dog_data = datasets.ImageFolder(root="dogImages/test", transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalization_transform])) valid_dog_data = datasets.ImageFolder(root="dogImages/valid", transform=transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalization_transform])) data_transfer = {'train': train_dog_data, 'valid': valid_dog_data, 'test': test_dog_data} train_dog_loader = DataLoader(train_dog_data, batch_size=20, shuffle=True) test_dog_loader = DataLoader(test_dog_data, batch_size=20, shuffle=True) valid_dog_loader = DataLoader(valid_dog_data, batch_size=20, shuffle=True) loaders_transfer = {'train': train_dog_loader, 'valid': valid_dog_loader, 'test': test_dog_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.densenet121(pretrained=True) # As only changing the classifier prevent feature extraction parameters from being updated for param in model_transfer.parameters(): param.requires_grad = False # Change the classifier to be suitable for dog breed identification num_of_features = model_transfer.classifier.in_features model_transfer.classifier = nn.Linear(num_of_features, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I have selected densenet121. This is one of the pretrained vision models in pytorch and is meant to build upon resnet to give better image recognition. Inspecting the model reveals a single classifier layer at model_transfer.classifier. I have replaced the classifier layer with a single linear layer with the same number of input features as the original classifier, but with the number of output features reduced to 133 to match the number of classes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 10 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 3.808450 Validation Loss: 2.641543 Epoch: 2 Training Loss: 2.287106 Validation Loss: 1.647435 Epoch: 3 Training Loss: 1.593291 Validation Loss: 1.210166 Epoch: 4 Training Loss: 1.250893 Validation Loss: 0.973607 Epoch: 5 Training Loss: 1.058738 Validation Loss: 0.841996 Epoch: 6 Training Loss: 0.929717 Validation Loss: 0.747822 Epoch: 7 Training Loss: 0.809608 Validation Loss: 0.703295 Epoch: 8 Training Loss: 0.753196 Validation Loss: 0.639810 Epoch: 9 Training Loss: 0.690395 Validation Loss: 0.616178 Epoch: 10 Training Loss: 0.648813 Validation Loss: 0.572752 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.597594 Test Accuracy: 85% (711/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] in_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalization_transform]) def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) img = in_transform(img)[:3,:,:].unsqueeze(0) if use_cuda: img = img.cuda() output = model_transfer(img) _, prediction = torch.max(output, 1) return class_names[prediction] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if dog_detector(img_path): breed = predict_breed_transfer(img_path) print(f"Found dog with breed {breed}") elif face_detector(img_path): breed = predict_breed_transfer(img_path) print(f"Human resembling breed {breed}") else: print("Neither dog or human detected") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) This model performs quite well considering the results for a first attempt. The accuracy may be improved by training it for longer as the loss was still decreasing at the time I stopped it. Additional images or extra augmentation may help due to the wide variation which may be expected in images and differences between dogs of the same breed. Finally improving the face and dog detection may help as the current implementation misses some dogs and faces, as well as not being able to detect whether an image contains both a dog and a human. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output Human resembling breed Bull terrier Human resembling breed Bull terrier Neither dog or human detected Found dog with breed Affenpinscher Found dog with breed Affenpinscher Found dog with breed Affenpinscher ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)1. human face detection accuracy is 98%2. dog face detection accuracy is 17% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_accuracy = 0 dog_accuracy = 0 for i in range(100): if face_detector(human_files_short[i]): human_accuracy += 1 if face_detector(dog_files_short[i]): dog_accuracy += 1 print(f"human face detection accuracy is {human_accuracy}%") print(f"dog face detection accuracy is {dog_accuracy}%") ###Output human face detection accuracy is 98% dog face detection accuracy is 17% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print(f"use_cuda is {use_cuda}\n") print(VGG16) ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 101157838.90it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True import torchvision.transforms as transforms def open_and_process_img(img_path): im_size = 256 crop_size = 224 mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] normalize = transforms.Normalize(mean, std) preprocess = transforms.Compose([ transforms.Resize(im_size), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize ]) img = Image.open(img_path) # preprocess image and add batch as a dimension processed_img = preprocess(img).unsqueeze_(0) if use_cuda: processed_img = processed_img.cuda() return processed_img def predict_class_index(model, processed_img): # turn on evaluation mode model.eval() # turn off autograd with torch.no_grad(): output = model(processed_img) predicted_class_index = output.argmax().item() # turn off evaluation mode model.train() return predicted_class_index def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image processed_img = open_and_process_img(img_path) return predict_class_index(VGG16, processed_img) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return 268 >= index >= 151 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 1. Dog accuracy in human_files_short is 0%2. Dog accuracy in dog_files_short is 100% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_accuracy_in_human_files = 0 dog_accuracy_in_dog_files = 0 for i in range(100): if dog_detector(human_files_short[i]): dog_accuracy_in_human_files += 1 if dog_detector(dog_files_short[i]): dog_accuracy_in_dog_files += 1 print(f"Dog accuracy in human_files_short is {dog_accuracy_in_human_files}%") print(f"Dog accuracy in dog_files_short is {dog_accuracy_in_dog_files}%") ###Output Dog accuracy in human_files_short is 0% Dog accuracy in dog_files_short is 100% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_dir = '/data/dog_images' # global keys for train, valid and test in the same sequence train, valid, test = 'train', 'valid', 'test' phases = [train, valid, test] # number of subprocesses to use for data loading num_workers = 0 # number of samples per batch to load batch_size = 16 im_size = 256 crop_size = 224 mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] normalize = transforms.Normalize(mean, std) # Define your transforms for the training and validation sets data_transforms = { train: transforms.Compose([ transforms.RandomResizedCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]), valid: transforms.Compose([ transforms.Resize(im_size), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize ]), test: transforms.Compose([ transforms.Resize(im_size), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize ]) } # Load the datasets with ImageFolder image_datasets = {phase: datasets.ImageFolder(os.path.join(data_dir, phase), transform=data_transforms[phase]) for phase in phases} # Using the image datasets and the tranforms, define the dataloader dataloaders = {phase: torch.utils.data.DataLoader(image_datasets[phase], batch_size=batch_size, num_workers=num_workers, shuffle=True) for phase in phases} dataset_sizes = {phase: len(image_datasets[phase]) for phase in phases} num_dog_classes = len(image_datasets[train].classes) print(f"total dog classes are {num_dog_classes}") ###Output total dog classes are 133 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:1. I chose the standard resize value of 256 and crop value of 224 to resize the image. I did random resized crop with size 224 for train set and resize of 256 and then center cropping with 224 for valid and test sets. These are the standard values for VGG16 model and I decided to stick to it.2. I decided to augment the dataset. I used resize, crop, and horizontal flip on training dataset. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # convolutional layer1 sees (224x224x3) image tensor self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1) # convolutional layer2 sees (56x56x32) tensor after maxpool self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1) # convolutional layer3 sees (14x14x64) tensor after maxpool self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) # after conv3 and maxpool, we get (7x7x128) tensor # batchNormalization layer self.batchNorm = nn.BatchNorm2d(128) # max pooling layer self.pool = nn.MaxPool2d(kernel_size=2, stride=2) # linear layer1 (7 * 7 * 128 -> 512) self.fc1 = nn.Linear(7 * 7 * 128, 512) # linear layer2 (512 -> num_dog_classes) self.fc2 = nn.Linear(512, num_dog_classes) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.batchNorm(x) # flatten image input x = x.view(-1, 7 * 7 * 128) # add dropout to layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout to layer x = self.dropout(x) # add 3rd hidden layer x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I added 3 convolution layers with kernel size of 3 and padding of 1. However, I added stride of 2 for 1st two layers. Then I gradually increased the output channel size from 32 to 128 in the multiplication of 2. After each convolution layer, it will pass through ReLU activation function and MaxPool of kernel size 2, stride 2, which will halve the tensor.After convolution layer, I passed the output to two fully-connected layers with ReLU activation function and dropout of 0.3.I had tried with 5 convolution layers and three fully-connected layers with Adam optimizer, but my training loss was not decreasing. Hence I went with above architecture and shifted to SGD. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model n_epochs = 40 # train the model loaders_scratch = dataloaders model_scratch = train(n_epochs, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.752794 Validation Loss: 4.609991 Validation loss decreased (inf --> 4.609991). Saving model... Epoch: 2 Training Loss: 4.531744 Validation Loss: 4.742953 Epoch: 3 Training Loss: 4.388729 Validation Loss: 4.286127 Validation loss decreased (4.609991 --> 4.286127). Saving model... Epoch: 4 Training Loss: 4.278649 Validation Loss: 4.178638 Validation loss decreased (4.286127 --> 4.178638). Saving model... Epoch: 5 Training Loss: 4.184365 Validation Loss: 4.140157 Validation loss decreased (4.178638 --> 4.140157). Saving model... Epoch: 6 Training Loss: 4.089499 Validation Loss: 4.218699 Epoch: 7 Training Loss: 3.994774 Validation Loss: 3.886493 Validation loss decreased (4.140157 --> 3.886493). Saving model... Epoch: 8 Training Loss: 3.938348 Validation Loss: 3.841513 Validation loss decreased (3.886493 --> 3.841513). Saving model... Epoch: 9 Training Loss: 3.889404 Validation Loss: 3.951707 Epoch: 10 Training Loss: 3.805171 Validation Loss: 3.691784 Validation loss decreased (3.841513 --> 3.691784). Saving model... Epoch: 11 Training Loss: 3.746225 Validation Loss: 3.669086 Validation loss decreased (3.691784 --> 3.669086). Saving model... Epoch: 12 Training Loss: 3.724028 Validation Loss: 3.653896 Validation loss decreased (3.669086 --> 3.653896). Saving model... Epoch: 13 Training Loss: 3.668931 Validation Loss: 3.809511 Epoch: 14 Training Loss: 3.599389 Validation Loss: 3.396244 Validation loss decreased (3.653896 --> 3.396244). Saving model... Epoch: 15 Training Loss: 3.565969 Validation Loss: 3.497134 Epoch: 16 Training Loss: 3.519962 Validation Loss: 3.582606 Epoch: 17 Training Loss: 3.486735 Validation Loss: 3.417332 Epoch: 18 Training Loss: 3.481862 Validation Loss: 3.472093 Epoch: 19 Training Loss: 3.398566 Validation Loss: 3.489837 Epoch: 20 Training Loss: 3.391873 Validation Loss: 3.513326 Epoch: 21 Training Loss: 3.320244 Validation Loss: 3.269986 Validation loss decreased (3.396244 --> 3.269986). Saving model... Epoch: 22 Training Loss: 3.293757 Validation Loss: 3.225341 Validation loss decreased (3.269986 --> 3.225341). Saving model... Epoch: 23 Training Loss: 3.259340 Validation Loss: 3.344708 Epoch: 24 Training Loss: 3.229743 Validation Loss: 3.287146 Epoch: 25 Training Loss: 3.208021 Validation Loss: 3.664380 Epoch: 26 Training Loss: 3.221724 Validation Loss: 3.318207 Epoch: 27 Training Loss: 3.175948 Validation Loss: 3.281355 Epoch: 28 Training Loss: 3.143841 Validation Loss: 3.358771 Epoch: 29 Training Loss: 3.130852 Validation Loss: 3.380400 Epoch: 30 Training Loss: 3.090849 Validation Loss: 3.289045 Epoch: 31 Training Loss: 3.072389 Validation Loss: 3.192833 Validation loss decreased (3.225341 --> 3.192833). Saving model... Epoch: 32 Training Loss: 3.047431 Validation Loss: 3.400687 Epoch: 33 Training Loss: 3.031165 Validation Loss: 3.024716 Validation loss decreased (3.192833 --> 3.024716). Saving model... Epoch: 34 Training Loss: 3.026075 Validation Loss: 3.096032 Epoch: 35 Training Loss: 3.007017 Validation Loss: 3.158739 Epoch: 36 Training Loss: 2.968704 Validation Loss: 3.546177 Epoch: 37 Training Loss: 2.972792 Validation Loss: 3.197417 Epoch: 38 Training Loss: 2.942709 Validation Loss: 3.084244 Epoch: 39 Training Loss: 2.902776 Validation Loss: 3.125854 Epoch: 40 Training Loss: 2.892846 Validation Loss: 3.233792 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class output = F.softmax(output, dim=1) pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.035008 Test Accuracy: 27% (226/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code import torchvision.models as models # download the pretrained model model_transfer = models.vgg16(pretrained=True) model_transfer ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torch.nn as nn ## TODO: Specify model architecture # Freeze parameters of the model to avoid brackpropagation for param in model_transfer.parameters(): param.requires_grad = False # Define dog breed classifier part of model_transfer classifier = nn.Sequential(nn.Linear(7*7*512, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 1024), nn.ReLU(), nn.Dropout(0.5), nn.Linear(1024, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, num_dog_classes)) # Rplace the original classifier with the dog breed classifier from above model_transfer.classifier = classifier if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I used vgg16 model which is pretrained on many images of different categories and hence it should have higher accuracy.I kept the convolution layers of this model as it is because I believe it has good feature extractor due to training on many images. Thus I only changed the classifier and added four fully-connected layers to it with ReLU activation and dropout of 0.5 - the same value as vgg16 has. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.005) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code n_epochs = 25 loaders_transfer = dataloaders # train the model model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.837315 Validation Loss: 4.584373 Validation loss decreased (inf --> 4.584373). Saving model... Epoch: 2 Training Loss: 4.421878 Validation Loss: 3.211812 Validation loss decreased (4.584373 --> 3.211812). Saving model... Epoch: 3 Training Loss: 3.423073 Validation Loss: 1.869799 Validation loss decreased (3.211812 --> 1.869799). Saving model... Epoch: 4 Training Loss: 2.625210 Validation Loss: 1.287815 Validation loss decreased (1.869799 --> 1.287815). Saving model... Epoch: 5 Training Loss: 2.187091 Validation Loss: 1.042355 Validation loss decreased (1.287815 --> 1.042355). Saving model... Epoch: 6 Training Loss: 1.883960 Validation Loss: 0.874987 Validation loss decreased (1.042355 --> 0.874987). Saving model... Epoch: 7 Training Loss: 1.733245 Validation Loss: 0.784774 Validation loss decreased (0.874987 --> 0.784774). Saving model... Epoch: 8 Training Loss: 1.650134 Validation Loss: 0.732854 Validation loss decreased (0.784774 --> 0.732854). Saving model... Epoch: 9 Training Loss: 1.560623 Validation Loss: 0.673331 Validation loss decreased (0.732854 --> 0.673331). Saving model... Epoch: 10 Training Loss: 1.462870 Validation Loss: 0.616905 Validation loss decreased (0.673331 --> 0.616905). Saving model... Epoch: 11 Training Loss: 1.415885 Validation Loss: 0.586387 Validation loss decreased (0.616905 --> 0.586387). Saving model... Epoch: 12 Training Loss: 1.329112 Validation Loss: 0.551084 Validation loss decreased (0.586387 --> 0.551084). Saving model... Epoch: 13 Training Loss: 1.300486 Validation Loss: 0.555801 Epoch: 14 Training Loss: 1.289603 Validation Loss: 0.596751 Epoch: 15 Training Loss: 1.249213 Validation Loss: 0.527462 Validation loss decreased (0.551084 --> 0.527462). Saving model... Epoch: 16 Training Loss: 1.221848 Validation Loss: 0.533462 Epoch: 17 Training Loss: 1.190377 Validation Loss: 0.507375 Validation loss decreased (0.527462 --> 0.507375). Saving model... Epoch: 18 Training Loss: 1.174511 Validation Loss: 0.508307 Epoch: 19 Training Loss: 1.129867 Validation Loss: 0.525060 Epoch: 20 Training Loss: 1.115245 Validation Loss: 0.503840 Validation loss decreased (0.507375 --> 0.503840). Saving model... Epoch: 21 Training Loss: 1.087990 Validation Loss: 0.488787 Validation loss decreased (0.503840 --> 0.488787). Saving model... Epoch: 22 Training Loss: 1.095026 Validation Loss: 0.463143 Validation loss decreased (0.488787 --> 0.463143). Saving model... Epoch: 23 Training Loss: 1.057969 Validation Loss: 0.465628 Epoch: 24 Training Loss: 1.048773 Validation Loss: 0.456484 Validation loss decreased (0.463143 --> 0.456484). Saving model... Epoch: 25 Training Loss: 1.018723 Validation Loss: 0.460079 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.496047 Test Accuracy: 84% (705/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in image_datasets['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed processed_img = open_and_process_img(img_path) class_index = predict_class_index(model_transfer, processed_img) return class_names[class_index] # return dog breed ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): img = Image.open(img_path) ## handle cases for a human face, dog, and neither if face_detector(img_path): print("hello, human!") print(f"You look like a ... {predict_breed_transfer(img_path)}") elif dog_detector(img_path): print("hello, dog!") print(f"Your predicted breed is ... {predict_breed_transfer(img_path)}") else: print("Err! No human or dog detected in the picture") plt.imshow(img) plt.show() print("*****************************************\n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The output is better.- I can try with increasing the learning rate and number of epochs along with other hyper parameters.- I can have the a neural net to detect the human faces.- I can try with other pretrained models like densenet121, resnet. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output hello, human! You look like a ... Basenji ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces = np.sum([face_detector(img) for img in tqdm(human_files_short)]) dog_faces = np.sum([face_detector(img) for img in tqdm(dog_files_short)]) print(f'Faces detected in human files: {human_faces}%') print(f'Faces detected in dog files: {dog_faces}%') ###Output 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 81.39it/s] 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:06<00:00, 15.51it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # load image img = Image.open(img_path) # Define img preprocessing transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # Transform image & add new axis 0 img_tensor = transform(img).unsqueeze(0) if use_cuda: img_tensor = img_tensor.cuda() out = VGG16(img_tensor) null, pred = torch.max(out, 1) return pred.item() # predicted class index VGG16_predict(dog_files_short[42]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return True if VGG16_predict(img_path) >= 151 or VGG16_predict(img_path) <= 268 else False dog_detector(dog_files_short[42]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 0% of the images in `human_files_short` detected a dog, whereas 100% of the images in `dog_files_short` had a dog detected. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. from tqdm import tqdm dogs_detected_in_human_data = np.sum([dog_detector(img) for img in tqdm(human_files_short)]) dogs_detected_in_dog_data = np.sum([dog_detector(img) for img in tqdm(dog_files_short)]) print(f'Dogs detected in human files: {dogs_detected_in_human_data}%') print(f'Dogs detected in dog files: {dogs_detected_in_dog_data}%') ###Output 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Percentage of human faces in human files is 98%.Percentage of dog faces in dog files is 17% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short human_face_in_human = [face_detector(img) for img in tqdm(human_files_short)] human_face_in_dog = [face_detector(img) for img in tqdm(dog_files_short)] print("percentage of detected human faces in human files is {}%".format(sum(human_face_in_human))) print("percentage of detected human faces in dog files is {}%".format(sum(human_face_in_dog))) ###Output 100%|██████████| 100/100 [00:03<00:00, 27.27it/s] 100%|██████████| 100/100 [00:39<00:00, 2.55it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms from torch.autograd import Variable def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) data_transforms = transforms.Compose([transforms.Resize(size=(244,244)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img_preprocess = data_transforms(img) img_preprocess.unsqueeze_(0) image = img_preprocess.cuda() net = VGG16(image) return torch.max(net,1)[1].item() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. prediction = VGG16_predict(img_path) return ((prediction <= 268) & (prediction >= 151)) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Percentage of detected dog face in human files is 1%Percentage of detected dog face in dog files is 100% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] dog_face_in_human_files = [dog_detector(img) for img in tqdm(human_files_short)] dog_face_in_dog_files = [dog_detector(img) for img in tqdm(dog_files_short)] print("Percentage of detected dog face in human files is {}%".format(np.sum(dog_face_in_human_files))) print("Percentage of detected dog face in dog files is {}%".format(np.sum(dog_face_in_dog_files))) ###Output 100%|██████████| 100/100 [00:04<00:00, 24.52it/s] 100%|██████████| 100/100 [00:05<00:00, 18.64it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import transforms,datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size = 20 num_workers = 2 data_transforms = transforms.Compose([transforms.Resize(224), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), #transforms.RandomAffine(20, translate=None, scale=None, shear=20, fillcolor=0), # transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transform = transforms.Compose( [transforms.Resize(224), transforms.RandomCrop(224), #transforms.Grayscale(3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])] ) data_dir = "/data/dog_images" train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') train_data = datasets.ImageFolder(train_dir, transform=data_transforms) valid_data = datasets.ImageFolder(valid_dir, transform=data_transforms) test_data = datasets.ImageFolder(test_dir, transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle = True, num_workers = num_workers) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle = True, num_workers = num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers = num_workers) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I have decided to resize the image to 224 as it is the appropriate image size for the CNN architecture. I augmented the dataset by random crop, random horizontal and vertical flip. I thought by changing the color saturations and do some affine transformations might increase the testing data accuracy, but it is not. So, I have limited the data augmentation to those above for the training and validation dataset, then for testing data set, I just augmented it with the random crop. I set number of workers to 2 to reduce the computing times. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3) self.conv2 = nn.Conv2d(16, 32, 3) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64, 128, 3) self.conv5 = nn.Conv2d(128,256, 3) self.pool = nn.MaxPool2d(2, 2) self.conv_bn1 = nn.BatchNorm2d(224,3) self.conv_bn2 = nn.BatchNorm2d(16) self.conv_bn3 = nn.BatchNorm2d(32) self.conv_bn4 = nn.BatchNorm2d(64) self.conv_bn5 = nn.BatchNorm2d(128) self.conv_bn6 = nn.BatchNorm2d(256) self.fc1 = nn.Linear(5 * 5 * 256, 500) self.fc2 = nn.Linear(500, 133) self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.pool(x) x = self.conv_bn2(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = self.conv_bn3(x) x = F.relu(self.conv3(x)) x = self.pool(x) x = self.conv_bn4(x) x = F.relu(self.conv4(x)) x = self.pool(x) x = self.conv_bn5(x) x = F.relu(self.conv5(x)) x = self.pool(x) x = self.conv_bn6(x) # flatten image input x = x.view(-1, 5 * 5 * 256) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add last hidden layer x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I use 5 convolutional layers with every layers performance are enhanced by using batch normalization. Dropout is used in the fully connected layer to prevent overfitting while training the model. Same goes to batch normalization. Batch normalization is a way to prevent overfitting for convolutional layers. I started with a small layer of 3 and ends with 256. Then, I just used two fully connected layers with a dropout probability of 0.3. I used relu as the activation functions for all the layers except for the last fully connected layer. The step of building a convolutional layer is, the layer, activating it, pooling it then batch normalized it. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code from torch import optim criterion_scratch = nn.CrossEntropyLoss() learning_rate = 0.0001 reduction = 1/(10**(1./10)) optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=learning_rate) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_scratch, factor=reduction, patience=1, min_lr=0.000000001, cooldown=1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += loss.item()*data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item()*data.size(0) # calculate average losses train_loss = train_loss/len(train_loader.dataset) valid_loss = valid_loss/len(valid_loader.dataset) #reduce learning rate scheduler.step(valid_loss) for param_group in optimizer.param_groups: print('learning rate: ' + str(param_group['lr'])) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output learning rate: 0.0001 Epoch: 1 Training Loss: 2.294186 Validation Loss: 3.089329 Validation loss decreased (inf --> 3.089329). Saving model ... learning rate: 0.0001 Epoch: 2 Training Loss: 2.276147 Validation Loss: 3.059518 Validation loss decreased (3.089329 --> 3.059518). Saving model ... learning rate: 0.0001 Epoch: 3 Training Loss: 2.185283 Validation Loss: 3.135017 learning rate: 0.0001 Epoch: 4 Training Loss: 2.129291 Validation Loss: 3.053269 Validation loss decreased (3.059518 --> 3.053269). Saving model ... learning rate: 0.0001 Epoch: 5 Training Loss: 2.078873 Validation Loss: 3.065223 learning rate: 0.0001 Epoch: 6 Training Loss: 2.017567 Validation Loss: 3.022861 Validation loss decreased (3.053269 --> 3.022861). Saving model ... learning rate: 0.0001 Epoch: 7 Training Loss: 1.989338 Validation Loss: 3.021452 Validation loss decreased (3.022861 --> 3.021452). Saving model ... learning rate: 0.0001 Epoch: 8 Training Loss: 1.911439 Validation Loss: 2.999404 Validation loss decreased (3.021452 --> 2.999404). Saving model ... learning rate: 0.0001 Epoch: 9 Training Loss: 1.887812 Validation Loss: 3.049808 learning rate: 0.0001 Epoch: 10 Training Loss: 1.809728 Validation Loss: 2.991976 Validation loss decreased (2.999404 --> 2.991976). Saving model ... learning rate: 0.0001 Epoch: 11 Training Loss: 1.773462 Validation Loss: 3.013681 learning rate: 0.0001 Epoch: 12 Training Loss: 1.718708 Validation Loss: 2.955194 Validation loss decreased (2.991976 --> 2.955194). Saving model ... learning rate: 0.0001 Epoch: 13 Training Loss: 1.676528 Validation Loss: 2.943834 Validation loss decreased (2.955194 --> 2.943834). Saving model ... learning rate: 0.0001 Epoch: 14 Training Loss: 1.640356 Validation Loss: 2.908609 Validation loss decreased (2.943834 --> 2.908609). Saving model ... learning rate: 0.0001 Epoch: 15 Training Loss: 1.592916 Validation Loss: 2.911945 learning rate: 7.943282347242815e-05 Epoch: 16 Training Loss: 1.538373 Validation Loss: 2.913129 learning rate: 7.943282347242815e-05 Epoch: 17 Training Loss: 1.500865 Validation Loss: 2.912822 learning rate: 7.943282347242815e-05 Epoch: 18 Training Loss: 1.429383 Validation Loss: 2.981903 learning rate: 6.309573444801932e-05 Epoch: 19 Training Loss: 1.399902 Validation Loss: 2.970947 learning rate: 6.309573444801932e-05 Epoch: 20 Training Loss: 1.356544 Validation Loss: 2.925435 learning rate: 6.309573444801932e-05 Epoch: 21 Training Loss: 1.342978 Validation Loss: 2.975042 learning rate: 5.0118723362727224e-05 Epoch: 22 Training Loss: 1.305417 Validation Loss: 2.911434 learning rate: 5.0118723362727224e-05 Epoch: 23 Training Loss: 1.272479 Validation Loss: 2.940586 learning rate: 5.0118723362727224e-05 Epoch: 24 Training Loss: 1.234848 Validation Loss: 2.939387 learning rate: 3.981071705534972e-05 Epoch: 25 Training Loss: 1.208341 Validation Loss: 2.910188 learning rate: 3.981071705534972e-05 Epoch: 26 Training Loss: 1.151844 Validation Loss: 2.984124 learning rate: 3.981071705534972e-05 Epoch: 27 Training Loss: 1.134362 Validation Loss: 2.917829 learning rate: 3.162277660168379e-05 Epoch: 28 Training Loss: 1.141601 Validation Loss: 2.910304 learning rate: 3.162277660168379e-05 Epoch: 29 Training Loss: 1.095747 Validation Loss: 2.883897 Validation loss decreased (2.908609 --> 2.883897). Saving model ... learning rate: 3.162277660168379e-05 Epoch: 30 Training Loss: 1.082809 Validation Loss: 2.900528 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 2.815560 Test Accuracy: 31% (262/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import os from torchvision import transforms,datasets data_dir = "/data/dog_images" train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') train_data = datasets.ImageFolder(train_dir, transform=data_transforms) valid_data = datasets.ImageFolder(valid_dir, transform=data_transforms) test_data = datasets.ImageFolder(test_dir, transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle = True, num_workers = num_workers) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle = True, num_workers = num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers = num_workers) loaders_transfer = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.densenet161(pretrained=True) if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) for param in model.parameters(): param.requires_grad = False from collections import OrderedDict fc = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(2208, 1104)), ('relu', nn.ReLU()), ('dropout',nn.Dropout(0.5)), ('fc2', nn.Linear(1104, 133)), ('output', nn.LogSoftmax(dim=1)) ])) model_transfer.classifier = fc ###Output /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_. ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I have used the exact same transfer learning process when submitting for the flower classification project for last pytorch challenge. While experimenting on those flowers with 102 classes, I found that the last fully connected layer for their classification should be minimize to 2 and the dropout should not be more than 0.5. The dropout is used to discard unimportant nodes, so the higher the dropout, the higher the probability of important nodes are being discarded. So, I would like to test the same model for this type of data and see the results. I have also used the same criterion, optimizer, and data transfroms with my own model and would like to see how the results goes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code from torch import optim criterion_transfer = nn.CrossEntropyLoss() learning_rate = 0.0001 reduction = 1/(10**(1./10)) optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=learning_rate) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer_transfer, factor=reduction, patience=1, min_lr=0.000000001, cooldown=1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output learning rate: 0.0001 Epoch: 1 Training Loss: 3.240320 Validation Loss: 1.252229 Validation loss decreased (inf --> 1.252229). Saving model ... learning rate: 0.0001 Epoch: 2 Training Loss: 0.932457 Validation Loss: 0.853267 Validation loss decreased (1.252229 --> 0.853267). Saving model ... learning rate: 0.0001 Epoch: 3 Training Loss: 0.540245 Validation Loss: 0.691326 Validation loss decreased (0.853267 --> 0.691326). Saving model ... learning rate: 0.0001 Epoch: 4 Training Loss: 0.364930 Validation Loss: 0.669626 Validation loss decreased (0.691326 --> 0.669626). Saving model ... learning rate: 0.0001 Epoch: 5 Training Loss: 0.262106 Validation Loss: 0.632950 Validation loss decreased (0.669626 --> 0.632950). Saving model ... learning rate: 0.0001 Epoch: 6 Training Loss: 0.205965 Validation Loss: 0.677519 learning rate: 0.0001 Epoch: 7 Training Loss: 0.167186 Validation Loss: 0.632634 Validation loss decreased (0.632950 --> 0.632634). Saving model ... learning rate: 0.0001 Epoch: 8 Training Loss: 0.159708 Validation Loss: 0.659448 learning rate: 7.943282347242815e-05 Epoch: 9 Training Loss: 0.122318 Validation Loss: 0.814138 learning rate: 7.943282347242815e-05 Epoch: 10 Training Loss: 0.086469 Validation Loss: 0.563190 Validation loss decreased (0.632634 --> 0.563190). Saving model ... learning rate: 7.943282347242815e-05 Epoch: 11 Training Loss: 0.059396 Validation Loss: 0.666236 learning rate: 6.309573444801932e-05 Epoch: 12 Training Loss: 0.057601 Validation Loss: 0.639387 learning rate: 6.309573444801932e-05 Epoch: 13 Training Loss: 0.046728 Validation Loss: 0.590049 learning rate: 6.309573444801932e-05 Epoch: 14 Training Loss: 0.035784 Validation Loss: 0.587298 learning rate: 5.0118723362727224e-05 Epoch: 15 Training Loss: 0.045459 Validation Loss: 0.700204 learning rate: 5.0118723362727224e-05 Epoch: 16 Training Loss: 0.044054 Validation Loss: 0.549058 Validation loss decreased (0.563190 --> 0.549058). Saving model ... learning rate: 5.0118723362727224e-05 Epoch: 17 Training Loss: 0.026989 Validation Loss: 0.590972 learning rate: 3.981071705534972e-05 Epoch: 18 Training Loss: 0.025702 Validation Loss: 0.559347 learning rate: 3.981071705534972e-05 Epoch: 19 Training Loss: 0.021097 Validation Loss: 0.524101 Validation loss decreased (0.549058 --> 0.524101). Saving model ... learning rate: 3.981071705534972e-05 Epoch: 20 Training Loss: 0.021157 Validation Loss: 0.571843 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.613859 Test Accuracy: 85% (716/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = {'train': train_data, 'valid': valid_data, 'test': test_data} # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) data_transforms = transforms.Compose([transforms.Resize(size=(244,244)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img_preprocess = data_transforms(img) img_preprocess.unsqueeze_(0) image = img_preprocess.cuda() output = model_transfer(image) _, prediction = torch.max(output.data,1) predict_breed = class_names[prediction-1] return predict_breed ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) == True or dog_detector(img_path) == True: print ("Your predicted breed is") return predict_breed_transfer(img_path) else: return "no dogs or humans in the image" ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ Yes, it is better. Three improvements that we can do is, 1) increasing the training epoch as I just set it to 20. But even with 20, I can achiveve 85% accuracy. So, it is a good model I think. 2)Try different model like ResNet etc. 3) Try different optimizer and criterion. Modified the convolutional layers of the model might increase the accuracy. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below fig_index = 0 for file in np.hstack((human_files[5:8], dog_files[10:13])): predict_breed = run_app(file) fig_index += 1 img=np.asarray(Image.open(file)) fig = plt.figure(figsize=(16,4)) ax = fig.add_subplot(1,2,1) ax.imshow(img) plt.axis('off') print('{}. '.format(predict_breed)) ###Output Your predicted breed is American staffordshire terrier. Your predicted breed is Doberman pinscher. Your predicted breed is Doberman pinscher. Your predicted breed is Manchester terrier. Your predicted breed is Manchester terrier. Your predicted breed is Manchester terrier. ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code ##################################################################################################################### ###################################################### Imports ###################################################### ##################################################################################################################### import numpy as np from glob import glob import cv2 import matplotlib.pyplot as plt from tqdm import tqdm import torch import torchvision.models as models from PIL import Image import os from torchvision import datasets import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###################################################################################################################### ################################################ Parameter definition ################################################ ###################################################################################################################### p_test_dog='dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg' # test image of a dog p_data_dir = "dogImages" # data directory p_lr=0.001 # Learning rate p_epochs= 100 # Epochs for Training (inital value: 100) p_stop_train=5 #training is stopped when X epochs no improvement occures ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. correct=0 for file in human_files_short: if face_detector(file) !=0: correct+=1 perc_correct= correct/len(human_files_short)*100 print("Correct detected humans: {}%".format(perc_correct)) wrong=0 for file in dog_files_short: if face_detector(file) !=0: wrong+=1 perc_wrong= wrong/len(dog_files_short)*100 print("Dogs detected as humans: {}%".format(perc_wrong)) ###Output Correct detected humans: 96.0% Dogs detected as humans: 18.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: device='cuda:0' print('Training on GPU') else: device='cpu' print('Training on CPU') VGG16 = VGG16.to(device) ###Output Training on GPU ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img = Image.open(img_path).convert('RGB') img_tensor = transform(img)[:3,:,:].unsqueeze(0) VGG16.eval() # move input data to GPU, if available img_tensor = img_tensor.to(device) output = VGG16( img_tensor ) # move output data to cpu output=output.to('cpu') # convert probabilities to class prediction _,preds_tensor = torch.max(output, 1) pred = np.squeeze(preds_tensor.numpy()) return pred # predicted class index print("predicted class id : ", VGG16_predict(p_test_dog )) ###Output predicted class id : 252 ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. predclassid=VGG16_predict(img_path) return (151 <= predclassid <= 268)# true/false dog_detector( p_test_dog ) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. wrong=0 for file in human_files_short: if dog_detector(file) !=False: wrong+=1 perc_wrong= wrong/len(human_files_short)*100 print("Humans detected as dogs: {}%".format(perc_wrong)) correct=0 for file in dog_files_short: if dog_detector(file) !=False: correct+=1 perc_correct= correct/len(dog_files_short)*100 print("Dogs detected as dogs: {}%".format(perc_correct)) ###Output Humans detected as dogs: 0.0% Dogs detected as dogs: 93.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) train_data = datasets.ImageFolder( p_data_dir + '/train', transform=train_transform ) valid_data = datasets.ImageFolder( p_data_dir + '/valid', transform=test_transform ) test_data = datasets.ImageFolder( p_data_dir + '/test', transform=test_transform ) trainloader = torch.utils.data.DataLoader( train_data, batch_size=16, shuffle=True ) testloader = torch.utils.data.DataLoader( test_data, batch_size=8 ) validloader = torch.utils.data.DataLoader( valid_data, batch_size=8 ) # create one dictionary for all data loaders loaders_scratch = {} loaders_scratch['train'] = trainloader loaders_scratch['valid'] = validloader loaders_scratch['test'] = testloader ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I reuse the data transformation for the vgg16 network in the next step. Therefore the final size of the resized and center cropped picture is /has to be 224x224 pixel.I decided to augment (flip and rotate) the train dataset to increase the variation of the training data. As the validation and test data is not used to improve/influence the model (no backpropagation) I don't augment the data. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN #torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True) # Convolutional Layer self.conv1 = nn.Conv2d(3, 16, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, stride=1, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, stride=1, padding=1) self.conv4 = nn.Conv2d(64, 128, 3, stride=1, padding=1) # Pooling layer self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) # Fully connected layer self.fc1 = nn.Linear(28 * 28 * 64, 2000) self.fc3 = nn.Linear(2000, 133) #Dropout (20%) self.dropout = nn.Dropout(0.2) def forward(self, x): ### Define forward behavior ## Conv + Pooling x = F.relu(self.conv1(x)) x = self.pool1(x) x = F.relu(self.conv2(x)) x = self.pool1(x) x = F.relu(self.conv3(x)) #x = self.pool2(x) #x = F.relu(self.conv4(x)) x = self.pool2(x) # flatten image input --> 28 * 28 * 64 = 50176 x = x.view(-1, 28 * 28 * 64) ## Fully Connected Layer # fully connected Layer with dropout x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.fc3(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The model contains of three convolutional layers. Each of them followed by a pooling layer. The last two layers are fully connected layers with a dropout of 20% as a prevention for overfitting. Here I reused the architecture that has been shown in one of the training videos. The task in the training was similare to this one and achieved a good prediction.So I played around with the layer sizes and the learing rate to achieve the requested prediction accuracy of at least 10% (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.005, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize values valid_loss_min = np.Inf epoch=0 no_improve=0 #for epoch in range(1, n_epochs+1): while epoch < n_epochs and no_improve <p_stop_train: # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for data, target in loaders['train']: # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # ----------------------------- # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss#train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) train_loss += loss.item()*data.size(0) # ----------------------------- ###################### # validate the model # ###################### model.eval() for data, target in loaders['valid']: # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # ----------------------------- # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item() * data.size(0) # ----------------------------- # ----------------------------- # calculate average losses #train_loss = train_loss / len(loaders['train'].dataset) #valid_loss = valid_loss / len(loaders['valid'].dataset) # ----------------------------- # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.2f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.2f} --> {:.2f}). Saving model ...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss no_improve=0 else: no_improve+=1 epoch+=1 # return trained model return model # train the model model_scratch = train(p_epochs, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.132021 Test Accuracy: 10% (90/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture #Load the pretrained model from pytorch model_transfer = models.vgg16(pretrained=True) # print out the model structure print(model_transfer) n_inputs = model_transfer.classifier[6].in_features print('number of inputs for last layer: {} '.format(n_inputs)) n_outputs=model_transfer.classifier[6].out_features print('initial number of outputs for last layer: {} '.format(n_outputs)) n_outputs=len(train_data.classes) #133 model_transfer.classifier[6] = nn.Linear(4096, n_outputs, bias=True) print('redifined number of outputs for last layer: {} '.format(n_outputs)) # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False if use_cuda: model_transfer = model_transfer.cuda() ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (avgpool): AdaptiveAvgPool2d(output_size=(7, 7)) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) number of inputs for last layer: 4096 initial number of outputs for last layer: 1000 redifined number of outputs for last layer: 133 ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I choose to use the vgg16 architecture as this is pretrained network for pictures (including over 100 dog breeds). Initially the architecure has 1000 outputs in the final layer - so this needs to be reshaped to the 133 outputs from the current task. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=p_lr, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(p_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer_pretrained.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer_pretrained.pt')) ###Output Epoch: 0 Training Loss: 16051.07 Validation Loss: 661.051850 Validation loss decreased (inf --> 661.05). Saving model ... Epoch: 1 Training Loss: 9508.42 Validation Loss: 565.199376 Validation loss decreased (661.05 --> 565.20). Saving model ... Epoch: 2 Training Loss: 7691.27 Validation Loss: 512.883180 Validation loss decreased (565.20 --> 512.88). Saving model ... Epoch: 3 Training Loss: 6815.04 Validation Loss: 472.749627 Validation loss decreased (512.88 --> 472.75). Saving model ... Epoch: 4 Training Loss: 5915.15 Validation Loss: 448.830218 Validation loss decreased (472.75 --> 448.83). Saving model ... Epoch: 5 Training Loss: 5339.24 Validation Loss: 432.736480 Validation loss decreased (448.83 --> 432.74). Saving model ... Epoch: 6 Training Loss: 5085.72 Validation Loss: 415.819026 Validation loss decreased (432.74 --> 415.82). Saving model ... Epoch: 7 Training Loss: 4396.48 Validation Loss: 408.342283 Validation loss decreased (415.82 --> 408.34). Saving model ... Epoch: 8 Training Loss: 4267.36 Validation Loss: 430.459420 Epoch: 9 Training Loss: 3817.60 Validation Loss: 389.276640 Validation loss decreased (408.34 --> 389.28). Saving model ... Epoch: 10 Training Loss: 3513.62 Validation Loss: 392.234786 Epoch: 11 Training Loss: 3299.39 Validation Loss: 391.048179 Epoch: 12 Training Loss: 2977.73 Validation Loss: 415.760144 Epoch: 13 Training Loss: 2707.91 Validation Loss: 409.998693 Epoch: 14 Training Loss: 2701.21 Validation Loss: 414.104360 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.538066 Test Accuracy: 83% (701/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img = Image.open(img_path).convert('RGB') img_tensor = transform(img)[:3,:,:].unsqueeze(0) # move model inputs to cuda, if GPU available if use_cuda: img_tensor = img_tensor.cuda() # get sample outputs output = model_transfer(img_tensor) # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) pred = np.squeeze(preds_tensor.numpy()) if not use_cuda else np.squeeze(preds_tensor.cpu().numpy()) print(pred) return class_names[pred] def display_image(img_path): image = Image.open(img_path) plt.imshow(image) plt.show() ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither # check for human faces: if (face_detector(img_path)): print("hello human!") predicted_breed = predict_breed_transfer(img_path) display_image(img_path) print("You look like a ...") print(predicted_breed) # check if image has dogs: elif dog_detector(img_path): print("hello dog!") predicted_breed = predict_breed_transfer(img_path) display_image(img_path) print("Your breed is most likley ...") print(predicted_breed) # otherwise else: print("No dog or human face was detected in the image.") display_image(img_path,) print("Try another!") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)It was a lot of fun to work through this notebook.The accuracy is greater than 80% - much better than requested.Never the less to improve this even more I'd 1) increase the number of epochs2) decrease the learning rate to find a better minimum3) increase the augmentation e.g. rotation angle to have biger "range" of traing pictures ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[10:13], dog_files[:3])): run_app(file) ###Output hello human! 7 ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob import torch # check if CUDA is available use_cuda = torch.cuda.is_available() # load filenames for human and dog images human_files = np.array(glob("D:/AI/data/lfw/*/*")) dog_files = np.array(glob("D:/AI/data/dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8019 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. humans = [] dogs = [] for i in range(len(human_files_short)): humans.append(face_detector(human_files_short[i])) dogs.append(face_detector(dog_files_short[i])) print("humans: ", np.mean(humans)) print("dogs: ", np.mean(dogs)) ###Output humans: 0.96 dogs: 0.12 ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform(img).unsqueeze_(0) if use_cuda: img = img.cuda() output = VGG16(img) return torch.argmax(output).cpu().numpy() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. output = VGG16_predict(img_path) return output >= 151 and output <= 268 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__1 % of human_files_short detected a dog and 100 % of dog_files_short detected a dog. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. humans=[] dogs = [] for i in range(100): humans.append(dog_detector(human_files[i])) dogs.append(dog_detector(dog_files[i])) print("humans=", np.mean(humans)) print("dogs=", np.mean(dogs)) ###Output humans= 0.0 dogs= 1.0 ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms # Problem image (error when processing) removed from training set # D:/tmp/dogImages/train\098.Leonberger\Leonberger_06571.jpg num_workers = 0 batch_size = 20 data_folder = 'D:/AI/data/dogImages' loaders_scratch = {} train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) val_test_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) train_data = datasets.ImageFolder(root=data_folder + "/train", transform=train_transform) classes = train_data.classes # Verify that number of classes match assert(len(train_data.classes) == len(classes)) loaders_scratch['train'] = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) test_data = datasets.ImageFolder(root=data_folder + "/test",transform=val_test_transform) # Verify that number of classes match assert(len(test_data.classes) == len(classes)) loaders_scratch['test'] = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False) valid_data = datasets.ImageFolder(root=data_folder + "/valid", transform=val_test_transform) loaders_scratch['valid'] = torch.utils.data.DataLoader(dataset=valid_data, batch_size=batch_size, shuffle=False) # Verify that number of classes match assert(len(valid_data.classes) == len(classes)) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. **Answer**:* The training set uses random resize and crop to help prevent overfitting* The test and validation set are resized to same size 224 but no random so the testing can be consistant* Augment training data with random resize, rotations and random horizontal flips to help prevent overfitting* Normalize to make training more stable and help prevent disappearing gradient problems.* There should be 117 classification output values corresponding to the 117 breads of dogs (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1_1 = nn.Conv2d(3, 64, 3, padding=1) self.conv1_bn = nn.BatchNorm2d(64) self.conv2_1 = nn.Conv2d(64, 128, 3, padding=1) self.conv2_bn = nn.BatchNorm2d(128) self.conv3_1 = nn.Conv2d(128, 256, 3, padding=1) self.conv3_bn = nn.BatchNorm2d(256) self.pool = nn.MaxPool2d(kernel_size=3, stride=3) self.fc1 = nn.Linear(256 * 8 * 8, 2048) self.output = nn.Linear(2048, len(classes)) self.dropout = nn.Dropout(p=0.25) def forward(self, x): x= F.relu(self.conv1_1(x)) x = self.conv1_bn(x) x = self.pool(x) x= F.relu(self.conv2_1(x)) x = self.conv2_bn(x) x = self.pool(x) x = F.relu(self.conv3_1(x)) x = self.conv3_bn(x) x = self.pool(x) # Flatten image output x = x.view(-1, 256 * 8 * 8) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) return self.output(x) #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # torch.cuda.empty_cache() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Started experimentain with small network. At first I was getting GPU memory errors because the data had too many dimensions.Added a few more convolutional layers and ad MaxPool and BatchNormalization to reduce the dimensionality. Added dropout to help prevent overfitting. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.0003) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.item() - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.item() - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.749319 Test Accuracy: 13% (68/505) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## Specify data loaders import os from torchvision import datasets import torchvision.transforms as transforms # Problem image (error when processing) removed from training set # D:/tmp/dogImages/train\098.Leonberger\Leonberger_06571.jpg num_workers = 0 batch_size = 20 data_folder = 'D:/AI/data/dogImages' loaders_transfer = {} train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) val_test_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) train_data = datasets.ImageFolder(root=data_folder + "/train", transform=train_transform) classes = train_data.classes # Verify that number of classes match assert(len(train_data.classes) == len(classes)) loaders_transfer['train'] = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) test_data = datasets.ImageFolder(root=data_folder + "/test",transform=val_test_transform) # Verify that number of classes match assert(len(test_data.classes) == len(classes)) loaders_transfer['test'] = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False) valid_data = datasets.ImageFolder(root=data_folder + "/valid", transform=val_test_transform) loaders_transfer['valid'] = torch.utils.data.DataLoader(dataset=valid_data, batch_size=batch_size, shuffle=False) # Verify that number of classes match assert(len(valid_data.classes) == len(classes)) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # define VGG16 model model_transfer = models.vgg16(pretrained=True) for param in model_transfer.features.parameters(): param.requires_grad = False model_transfer.classifier[6] = nn.Linear(4096, len(classes)) # move model to GPU if CUDA is available if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ Loaded the VGG16 model and froze the feature layers that are already trained.Replaced the last layer of the claffifier with a new Lineage output layer that correspond to the log classes we are classifying. The VGG16 feature have already been trained to identity many different kind of images including dogs. By modifying the output layer of the classifier and training the new model with dog image it will be fine tuned to identify dog breeds. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 50 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.482508 Validation Loss: 3.424904 Validation loss decreased (inf --> 3.424904). Saving model ... Epoch: 2 Training Loss: 3.291067 Validation Loss: 1.882516 Validation loss decreased (3.424904 --> 1.882516). Saving model ... Epoch: 3 Training Loss: 2.430144 Validation Loss: 1.159215 Validation loss decreased (1.882516 --> 1.159215). Saving model ... Epoch: 4 Training Loss: 1.977458 Validation Loss: 0.884509 Validation loss decreased (1.159215 --> 0.884509). Saving model ... Epoch: 5 Training Loss: 1.738807 Validation Loss: 0.768604 Validation loss decreased (0.884509 --> 0.768604). Saving model ... Epoch: 6 Training Loss: 1.648849 Validation Loss: 0.720428 Validation loss decreased (0.768604 --> 0.720428). Saving model ... Epoch: 7 Training Loss: 1.554362 Validation Loss: 0.643236 Validation loss decreased (0.720428 --> 0.643236). Saving model ... Epoch: 8 Training Loss: 1.469904 Validation Loss: 0.620629 Validation loss decreased (0.643236 --> 0.620629). Saving model ... Epoch: 9 Training Loss: 1.439359 Validation Loss: 0.603273 Validation loss decreased (0.620629 --> 0.603273). Saving model ... Epoch: 10 Training Loss: 1.382572 Validation Loss: 0.582407 Validation loss decreased (0.603273 --> 0.582407). Saving model ... Epoch: 11 Training Loss: 1.345227 Validation Loss: 0.568540 Validation loss decreased (0.582407 --> 0.568540). Saving model ... Epoch: 12 Training Loss: 1.319658 Validation Loss: 0.560889 Validation loss decreased (0.568540 --> 0.560889). Saving model ... Epoch: 13 Training Loss: 1.310764 Validation Loss: 0.547600 Validation loss decreased (0.560889 --> 0.547600). Saving model ... Epoch: 14 Training Loss: 1.266870 Validation Loss: 0.543132 Validation loss decreased (0.547600 --> 0.543132). Saving model ... Epoch: 15 Training Loss: 1.252069 Validation Loss: 0.528827 Validation loss decreased (0.543132 --> 0.528827). Saving model ... Epoch: 16 Training Loss: 1.223463 Validation Loss: 0.518320 Validation loss decreased (0.528827 --> 0.518320). Saving model ... Epoch: 17 Training Loss: 1.216662 Validation Loss: 0.516169 Validation loss decreased (0.518320 --> 0.516169). Saving model ... Epoch: 18 Training Loss: 1.188964 Validation Loss: 0.522508 Epoch: 19 Training Loss: 1.168334 Validation Loss: 0.498648 Validation loss decreased (0.516169 --> 0.498648). Saving model ... Epoch: 20 Training Loss: 1.172391 Validation Loss: 0.508143 Epoch: 21 Training Loss: 1.150849 Validation Loss: 0.503743 Epoch: 22 Training Loss: 1.129095 Validation Loss: 0.494641 Validation loss decreased (0.498648 --> 0.494641). Saving model ... Epoch: 23 Training Loss: 1.151644 Validation Loss: 0.497570 Epoch: 24 Training Loss: 1.113620 Validation Loss: 0.503504 Epoch: 25 Training Loss: 1.103773 Validation Loss: 0.487106 Validation loss decreased (0.494641 --> 0.487106). Saving model ... Epoch: 26 Training Loss: 1.095958 Validation Loss: 0.491691 Epoch: 27 Training Loss: 1.100723 Validation Loss: 0.490359 Epoch: 28 Training Loss: 1.102028 Validation Loss: 0.475714 Validation loss decreased (0.487106 --> 0.475714). Saving model ... Epoch: 29 Training Loss: 1.086554 Validation Loss: 0.474312 Validation loss decreased (0.475714 --> 0.474312). Saving model ... Epoch: 30 Training Loss: 1.042248 Validation Loss: 0.463428 Validation loss decreased (0.474312 --> 0.463428). Saving model ... Epoch: 31 Training Loss: 1.070777 Validation Loss: 0.460097 Validation loss decreased (0.463428 --> 0.460097). Saving model ... Epoch: 32 Training Loss: 1.053443 Validation Loss: 0.464849 Epoch: 33 Training Loss: 1.038977 Validation Loss: 0.466620 Epoch: 34 Training Loss: 1.059384 Validation Loss: 0.463554 Epoch: 35 Training Loss: 1.032542 Validation Loss: 0.459485 Validation loss decreased (0.460097 --> 0.459485). Saving model ... Epoch: 36 Training Loss: 1.049958 Validation Loss: 0.461280 Epoch: 37 Training Loss: 1.028310 Validation Loss: 0.456590 Validation loss decreased (0.459485 --> 0.456590). Saving model ... Epoch: 38 Training Loss: 1.009734 Validation Loss: 0.459573 Epoch: 39 Training Loss: 1.006135 Validation Loss: 0.457234 Epoch: 40 Training Loss: 1.019139 Validation Loss: 0.454895 Validation loss decreased (0.456590 --> 0.454895). Saving model ... Epoch: 41 Training Loss: 1.007641 Validation Loss: 0.453813 Validation loss decreased (0.454895 --> 0.453813). Saving model ... Epoch: 42 Training Loss: 0.977341 Validation Loss: 0.445490 Validation loss decreased (0.453813 --> 0.445490). Saving model ... Epoch: 43 Training Loss: 0.985505 Validation Loss: 0.461917 Epoch: 44 Training Loss: 0.974407 Validation Loss: 0.456799 Epoch: 45 Training Loss: 0.986558 Validation Loss: 0.451747 Epoch: 46 Training Loss: 0.973268 Validation Loss: 0.446719 Epoch: 47 Training Loss: 0.988071 Validation Loss: 0.449478 Epoch: 48 Training Loss: 0.945157 Validation Loss: 0.450284 Epoch: 49 Training Loss: 0.975591 Validation Loss: 0.451726 Epoch: 50 Training Loss: 0.942519 Validation Loss: 0.436701 Validation loss decreased (0.445490 --> 0.436701). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.570899 Test Accuracy: 81% (412/505) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform(img).unsqueeze_(0) if use_cuda: img = img.cuda() output = model_transfer(img) cls = torch.argmax(output).cpu().numpy() return class_names[cls] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) dog_breed = predict_breed_transfer(img_path) is_human = face_detector(img_path) is_dog = dog_detector(img_path) if (is_human): print("Hello, human!") elif (is_dog): print("Detected a dog") else: print("Error not a human nor a dog") plt.imshow(img) plt.show() if (is_human): print("You look like a ", dog_breed) elif (is_dog): print(dog_breed) print("\n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)* It looks like the model determine the 'look like breed' more on the close I am wearing than my face. Perhaps preprocessing the images to crop picture to face would help.* One of the dogs is actually a Black Labrador but was identified as a Flat-coated Retriever. I could not find any Labradors categories in the sample data so adding more training data would likely help.* The puppy is actually a mixture of Black Labrador, Husky and Great Pyraneese, but was identified as as Norwegian lundehund. This present and even more complication because the training data does not contain many young dogs. I could possibly improve the algorithm to show mixed breends by not just picking the breed with the largest score. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below run_app('C:/Users/ivar/Pictures/ivar.jpg') run_app('C:/Users/ivar/Pictures/ivar2.jpg') run_app('C:/Users/ivar/Pictures/Harley.png') run_app('C:/Users/ivar/Pictures/Mitsie.png') run_app('C:/Users/ivar/Pictures/Sven.png') ###Output Hello, human! ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 0 total human images. There are 0 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. humans = [face_detector(img) for img in human_files_short] per_humans = sum(humans) dogs = [face_detector(img) for img in dog_files_short] per_dogs = sum(dogs) print("detection rate humans: "+str(per_humans)+" %, dogs: "+str(per_dogs)+" %.") ###Output detection rate humans: 98 %, dogs: 17 %. ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print(VGG16) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image, ImageStat #from torch.autograd import Variable import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path).convert('RGB') s = ImageStat.Stat(image) in_transform = transforms.Compose([ transforms.Resize((224,224)), #input VGG16 size transforms.ToTensor(), #transforms.Normalize(s.mean, s.stddev) transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])]) #image = in_transform(image)[:3,:,:].unsqueeze(0) image = in_transform(image) #.float() #image = Variable(image, requires_grad=True) image = image.unsqueeze(0) VGG16.eval() if use_cuda: image = image.cuda() output = VGG16(image) _, preds_tensor = torch.max(output, 1) return preds_tensor.cpu().numpy()[0] VGG16_predict('/data/dog_images/train/103.Mastiff/Mastiff_06833.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. p = VGG16_predict(img_path) #print(p) return p > 150 and p < 269 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. humans = [dog_detector(img) for img in human_files_short] per_humans = sum(humans) dogs = [dog_detector(img) for img in dog_files_short] per_dogs = sum(dogs) print("detection rate humans: "+str(per_humans)+" %, dogs: "+str(per_dogs)+" %.") ###Output detection rate humans: 0 %, dogs: 100 %. ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. # define densenet161 model densenet = models.densenet161(pretrained=True) # move model to GPU if CUDA is available if use_cuda: densenet = densenet.cuda() #print(densenet) def DenseNet_predict(img_path): image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((224,224)), #input VGG16 size transforms.ToTensor(), transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])]) #image = in_transform(image)[:3,:,:].unsqueeze(0) image = in_transform(image) #.float() #image = Variable(image, requires_grad=True) image = image.unsqueeze(0) densenet.eval() if use_cuda: image = image.cuda() output = densenet(image) _, preds_tensor = torch.max(output, 1) return preds_tensor.cpu().numpy()[0] DenseNet_predict('/data/dog_images/train/103.Mastiff/Mastiff_06833.jpg') def dog_detector2(img_path): ## TODO: Complete the function. p = DenseNet_predict(img_path) #print(p) return p > 150 and p < 269 # true/false humans = [dog_detector2(img) for img in human_files_short] per_humans = sum(humans) dogs = [dog_detector2(img) for img in dog_files_short] per_dogs = sum(dogs) print("detection rate humans: "+str(per_humans)+" %, dogs: "+str(per_dogs)+" %.") ###Output /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_. Downloading: "https://download.pytorch.org/models/densenet161-8d451a50.pth" to /root/.torch/models/densenet161-8d451a50.pth 100%|██████████| 115730790/115730790 [00:01<00:00, 89308618.77it/s] ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import numpy as np from torchvision import datasets import torchvision.transforms as transforms from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes def createLoaders(sizeInput): transform = transforms.Compose([ transforms.RandomVerticalFlip(0.3), transforms.RandomRotation(10), transforms.RandomResizedCrop(sizeInput), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) transformtest = transforms.Compose([ transforms.Resize((sizeInput,sizeInput)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) # load the training and test datasets train_data = datasets.ImageFolder(root='/data/dog_images/train',transform=transform) test_data = datasets.ImageFolder(root='/data/dog_images/test', transform=transformtest) valid_data = datasets.ImageFolder(root='/data/dog_images/valid', transform=transformtest) # Create training and test dataloaders num_workers = 0 # how many samples per batch to load batch_size = 64 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) loaders = {} loaders['train'] = train_loader loaders['test'] = test_loader loaders['valid'] = valid_loader return loaders loaders_scratch = createLoaders(224) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- images are resized to 224,224 similarly to VGG input. The three colors channels are kept giving a 224,224,3 tensor, in a batches of 64.- Various transformations are performed (vertical flip, rotation and resized crop). Position, orientation and size of dogs in input images can be very different so the network must learn all these variations. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(128 * 28 * 28, 1024) self.fc2 = nn.Linear(1024, 256) self.fc3 = nn.Linear(256, 133) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) # flatten image input x = x.view(-1, 128 * 28 * 28) # add dropout layer x = self.dropout(x) x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() print(model_scratch) ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=100352, out_features=1024, bias=True) (fc2): Linear(in_features=1024, out_features=256, bias=True) (fc3): Linear(in_features=256, out_features=133, bias=True) (dropout): Dropout(p=0.25) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The architecture is inspired from the 'cifar10_cnn_augmentation' excercise. An additional CNN layer and FC layer have been added to capture the subtleties of the different dog breeds. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss #train_loss += loss.item()*data.size(0) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss #valid_loss += loss.item()*data.size(0) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # save model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.883352 Validation Loss: 4.858548 Validation loss decreased (inf --> 4.858548). Saving model ... Epoch: 2 Training Loss: 4.816058 Validation Loss: 4.774469 Validation loss decreased (4.858548 --> 4.774469). Saving model ... Epoch: 3 Training Loss: 4.735299 Validation Loss: 4.650283 Validation loss decreased (4.774469 --> 4.650283). Saving model ... Epoch: 4 Training Loss: 4.676048 Validation Loss: 4.533309 Validation loss decreased (4.650283 --> 4.533309). Saving model ... Epoch: 5 Training Loss: 4.592791 Validation Loss: 4.512589 Validation loss decreased (4.533309 --> 4.512589). Saving model ... Epoch: 6 Training Loss: 4.532186 Validation Loss: 4.514357 Epoch: 7 Training Loss: 4.493033 Validation Loss: 4.436483 Validation loss decreased (4.512589 --> 4.436483). Saving model ... Epoch: 8 Training Loss: 4.458148 Validation Loss: 4.446252 Epoch: 9 Training Loss: 4.400166 Validation Loss: 4.333307 Validation loss decreased (4.436483 --> 4.333307). Saving model ... Epoch: 10 Training Loss: 4.355060 Validation Loss: 4.328773 Validation loss decreased (4.333307 --> 4.328773). Saving model ... Epoch: 11 Training Loss: 4.313457 Validation Loss: 4.198817 Validation loss decreased (4.328773 --> 4.198817). Saving model ... Epoch: 12 Training Loss: 4.240575 Validation Loss: 4.285127 Epoch: 13 Training Loss: 4.191254 Validation Loss: 4.124137 Validation loss decreased (4.198817 --> 4.124137). Saving model ... Epoch: 14 Training Loss: 4.167406 Validation Loss: 4.054853 Validation loss decreased (4.124137 --> 4.054853). Saving model ... Epoch: 15 Training Loss: 4.124972 Validation Loss: 4.086310 Epoch: 16 Training Loss: 4.078556 Validation Loss: 4.110572 Epoch: 17 Training Loss: 4.029112 Validation Loss: 3.974355 Validation loss decreased (4.054853 --> 3.974355). Saving model ... Epoch: 18 Training Loss: 3.972854 Validation Loss: 3.882929 Validation loss decreased (3.974355 --> 3.882929). Saving model ... Epoch: 19 Training Loss: 3.932988 Validation Loss: 4.033678 Epoch: 20 Training Loss: 3.898726 Validation Loss: 3.936265 Epoch: 21 Training Loss: 3.869160 Validation Loss: 3.938062 Epoch: 22 Training Loss: 3.819121 Validation Loss: 4.008240 Epoch: 23 Training Loss: 3.820961 Validation Loss: 3.829686 Validation loss decreased (3.882929 --> 3.829686). Saving model ... Epoch: 24 Training Loss: 3.772782 Validation Loss: 3.833923 Epoch: 25 Training Loss: 3.780395 Validation Loss: 3.818947 Validation loss decreased (3.829686 --> 3.818947). Saving model ... Epoch: 26 Training Loss: 3.769321 Validation Loss: 3.820615 Epoch: 27 Training Loss: 3.739455 Validation Loss: 4.041795 Epoch: 28 Training Loss: 3.686003 Validation Loss: 3.822886 Epoch: 29 Training Loss: 3.671715 Validation Loss: 3.720973 Validation loss decreased (3.818947 --> 3.720973). Saving model ... Epoch: 30 Training Loss: 3.670347 Validation Loss: 3.811321 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function model_scratch.load_state_dict(torch.load('model_scratch.pt')) test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = createLoaders(224) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) for param in model_transfer.features.parameters(): param.requires_grad = False n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs, 133) model_transfer.classifier[6] = last_layer if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ Solution inspired from the transfer learning excercise. Final layer of a pre-trained VGG network is adapted to our number of classes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(30, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.027144 Validation Loss: 2.062483 Validation loss decreased (inf --> 2.062483). Saving model ... Epoch: 2 Training Loss: 3.254838 Validation Loss: 1.610792 Validation loss decreased (2.062483 --> 1.610792). Saving model ... Epoch: 3 Training Loss: 3.117734 Validation Loss: 1.567174 Validation loss decreased (1.610792 --> 1.567174). Saving model ... Epoch: 4 Training Loss: 2.950598 Validation Loss: 1.330655 Validation loss decreased (1.567174 --> 1.330655). Saving model ... Epoch: 5 Training Loss: 2.902400 Validation Loss: 1.582676 Epoch: 6 Training Loss: 2.897001 Validation Loss: 1.486060 Epoch: 7 Training Loss: 2.830831 Validation Loss: 1.428683 Epoch: 8 Training Loss: 2.838330 Validation Loss: 1.437939 Epoch: 9 Training Loss: 2.768260 Validation Loss: 1.256951 Validation loss decreased (1.330655 --> 1.256951). Saving model ... Epoch: 10 Training Loss: 2.703723 Validation Loss: 1.285626 Epoch: 11 Training Loss: 2.734850 Validation Loss: 1.241444 Validation loss decreased (1.256951 --> 1.241444). Saving model ... Epoch: 12 Training Loss: 2.728185 Validation Loss: 1.173002 Validation loss decreased (1.241444 --> 1.173002). Saving model ... Epoch: 13 Training Loss: 2.712315 Validation Loss: 1.241443 Epoch: 14 Training Loss: 2.699485 Validation Loss: 1.166523 Validation loss decreased (1.173002 --> 1.166523). Saving model ... Epoch: 15 Training Loss: 2.709078 Validation Loss: 1.459468 Epoch: 16 Training Loss: 2.684065 Validation Loss: 1.304165 Epoch: 17 Training Loss: 2.682151 Validation Loss: 1.296578 Epoch: 18 Training Loss: 2.691603 Validation Loss: 1.182636 Epoch: 19 Training Loss: 2.663491 Validation Loss: 1.164603 Validation loss decreased (1.166523 --> 1.164603). Saving model ... Epoch: 20 Training Loss: 2.695265 Validation Loss: 1.157973 Validation loss decreased (1.164603 --> 1.157973). Saving model ... Epoch: 21 Training Loss: 2.607203 Validation Loss: 1.088854 Validation loss decreased (1.157973 --> 1.088854). Saving model ... Epoch: 22 Training Loss: 2.664319 Validation Loss: 1.145207 Epoch: 23 Training Loss: 2.581572 Validation Loss: 1.098469 Epoch: 24 Training Loss: 2.616433 Validation Loss: 1.105451 Epoch: 25 Training Loss: 2.711608 Validation Loss: 1.158691 Epoch: 26 Training Loss: 2.580783 Validation Loss: 1.078824 Validation loss decreased (1.088854 --> 1.078824). Saving model ... Epoch: 27 Training Loss: 2.606945 Validation Loss: 1.127715 Epoch: 28 Training Loss: 2.593313 Validation Loss: 1.153590 Epoch: 29 Training Loss: 2.560135 Validation Loss: 1.332768 Epoch: 30 Training Loss: 2.596265 Validation Loss: 1.150685 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.097917 Test Accuracy: 65% (551/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. model_transfer.load_state_dict(torch.load('model_transfer.pt')) # list of class names by index, i.e. a name can be accessed like class_names[0] train_data = datasets.ImageFolder(root='/data/dog_images/train') class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((224,224)), #input VGG16 size transforms.ToTensor(), transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])]) #image = in_transform(image)[:3,:,:].unsqueeze(0) image = in_transform(image) #.float() #image = Variable(image, requires_grad=True) image = image.unsqueeze(0) model_transfer.eval() if use_cuda: image = image.cuda() output = model_transfer(image) _, preds_tensor = torch.max(output, 1) return class_names[preds_tensor.cpu().numpy()[0]] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def displayImg(img_path): img = cv2.imread(img_path) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path): print('Picture of human') displayImg(img_path) breed = predict_breed_transfer(img_path) print("Looking like :"+breed) elif dog_detector(img_path): print('Picture of dog') displayImg(img_path) breed = predict_breed_transfer(img_path) print('it \'s a '+breed) else: print('The picture does not show a human nor a dog. Sorry !') displayImg(img_path) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The app does mostly a great job, but i can be improve. We can look at the examples below that the cocker spaniel was not detected properly.I would suggest to:- test different pre-trained network- run more epochs to get a better accurary- train on larger images to get more fine details of dog breeds ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. human_files = [] human_files.append('yl.jpg') human_files.append('nd.jpg') dog_files = [] dog_files.append('collie.jpg') dog_files.append('cocker.jpg') dog_files.append('cat.jpeg') ## suggested code, below for file in np.hstack((human_files, dog_files)): run_app(file) ###Output Picture of human ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Percentage of the first 100 images in human_files have a detected human face = 99.0 %Percentage of the first 100 images in dog_files have a detected human face = 6.0 % ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. num_human_faces_detected_in_human = 0 num_human_faces_detected_in_dogs = 0 for i in range(len(human_files_short)): if(face_detector(human_files_short[i])): num_human_faces_detected_in_human += 1 if(face_detector(dog_files_short[i])): num_human_faces_detected_in_dogs +=1 print("Percentage of the first 100 images in human_files have a detected human face = "+str(num_human_faces_detected_in_human*100/len(human_files_short))+" %") print("Percentage of the first 100 images in dog_files have a detected human face = "+str(num_human_faces_detected_in_dogs*100/len(human_files_short))+" %") ###Output Percentage of the first 100 images in human_files have a detected human face = 99.0 % Percentage of the first 100 images in dog_files have a detected human face = 6.0 % ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.OpenCV does a pretty good job of face detection with a 99%, so we are going to use OpenCV for the face detection task. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) VGG16.eval() # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image from PIL import ImageFile import torchvision.transforms as transforms ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path) img_transforms = transforms.Compose([ #transforms.RandomResizedCrop(224), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_transformed = img_transforms(image) if use_cuda: img_transformed = img_transformed.cuda() output = VGG16(img_transformed.view(1,3,224,224)) _, preds_tensor = torch.max(output, 1) return preds_tensor.item() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. dog_indices = list(range(151,269)) return VGG16_predict(img_path) in dog_indices # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Percentage of the first 100 images in human_files have a detected dog = 1.0 %Percentage of the first 100 images in dog_files have a detected dog = 100.0 % ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. num_dogs_detected_in_human = 0 num_dogs_detected_in_dogs = 0 for i in range(len(human_files_short)): if(dog_detector(human_files_short[i])): num_dogs_detected_in_human += 1 if(dog_detector(dog_files_short[i])): num_dogs_detected_in_dogs +=1 print("Percentage of the first 100 images in human_files have a detected dog = "+str(num_dogs_detected_in_human*100/len(human_files_short))+" %") print("Percentage of the first 100 images in dog_files have a detected dog = "+str(num_dogs_detected_in_dogs*100/len(human_files_short))+" %") ###Output Percentage of the first 100 images in human_files have a detected dog = 1.0 % Percentage of the first 100 images in dog_files have a detected dog = 100.0 % ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. # create a function the takes in an image and a model and return the predicted class index def model_predict(img_path, model,input_size): ''' Use pre-trained model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path) img_transforms = transforms.Compose([ transforms.Resize((input_size,input_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_transformed = img_transforms(image) if use_cuda: img_transformed = img_transformed.cuda() output = model(img_transformed.view(1,3,input_size,input_size)) _, preds_tensor = torch.max(output, 1) return preds_tensor.item() # predicted class index ### returns "True" if a dog is detected in the image stored at img_path for model def dog_detector_for_model(img_path,model,input_size=224): ## TODO: Complete the function. dog_indices = list(range(151,269)) return model_predict(img_path,model,input_size) in dog_indices # true/false # define resnet-18 model resnet50 = models.resnet50(pretrained=True) resnet50.eval() # move model to GPU if CUDA is available if use_cuda: resnet50 = resnet50.cuda() num_dogs_detected_in_human = 0 num_dogs_detected_in_dogs = 0 for i in range(len(human_files_short)): if(dog_detector_for_model(human_files_short[i],resnet50)): num_dogs_detected_in_human += 1 if(dog_detector_for_model(dog_files_short[i],resnet50)): num_dogs_detected_in_dogs +=1 print("Percentage of the first 100 images in human_files have a detected dog RESNET50 = "+str(num_dogs_detected_in_human*100/len(human_files_short))+" %") print("Percentage of the first 100 images in dog_files have a detected dog RESNET50 = "+str(num_dogs_detected_in_dogs*100/len(human_files_short))+" %") # define inception_v3 model inception = models.inception_v3(pretrained=True) inception.eval() # move model to GPU if CUDA is available if use_cuda: inception = inception.cuda() num_dogs_detected_in_human = 0 num_dogs_detected_in_dogs = 0 for i in range(len(human_files_short)): if(dog_detector_for_model(human_files_short[i],inception,299)): num_dogs_detected_in_human += 1 if(dog_detector_for_model(dog_files_short[i],inception,299)): num_dogs_detected_in_dogs +=1 print("Percentage of the first 100 images in human_files have a detected dog INCEPTION = "+str(num_dogs_detected_in_human*100/len(human_files_short))+" %") print("Percentage of the first 100 images in dog_files have a detected dog INCEPTION = "+str(num_dogs_detected_in_dogs*100/len(human_files_short))+" %") print(VGG16) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torchvision from torchvision import datasets batch_size = 32 num_workers = 5 input_image_size = (128,128) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transforms = transforms.Compose([transforms.Resize(input_image_size), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_valid_transforms = transforms.Compose([transforms.Resize(input_image_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_data = torchvision.datasets.ImageFolder('dogImages/train', transform=train_transforms) valid_data = torchvision.datasets.ImageFolder('dogImages/valid', transform=test_valid_transforms) test_data = torchvision.datasets.ImageFolder('dogImages/test', transform=test_valid_transforms) train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) valid_data_loader = torch.utils.data.DataLoader(valid_data, num_workers=num_workers) test_data_loader = torch.utils.data.DataLoader(test_data, num_workers=num_workers) # define loaders dictionary loaders_scratch={'train':train_data_loader, 'valid': valid_data_loader, 'test': test_data_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- I used the PyTorch transforms to resize images to 128x128. I tried different architectures for the model, and the model with input images with that size seemed to do pretty well on the dog breed classification task- Yes, augmentation definitely helped improve the model's performance. I augmented the data by using the random horizontal flip transform as well as the random rotation. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F input_image_depth = 3 # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # sequential self.conv1 = nn.Conv2d(input_image_depth,32,kernel_size=3,padding=1) self.conv11 = nn.Conv2d(32,64,kernel_size=3,padding=1) self.conv2 = nn.Conv2d(64,128,kernel_size=3,padding=1) self.conv21 = nn.Conv2d(128,256,kernel_size=3,padding=1) self.conv3 = nn.Conv2d(256,512,kernel_size=3,padding=1) self.mp = nn.MaxPool2d(2,2) # classifier self.fc1 = nn.Linear(16*16*512,1024) self.fc2 = nn.Linear(1024,512) self.fc3 = nn.Linear(512,133) self.dropout = nn.Dropout(0.5) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv11(x)) x = self.mp(x) x = F.relu(self.conv2(x)) x = F.relu(self.conv21(x)) x = self.mp(x) x = F.relu(self.conv3(x)) x = self.mp(x) # classifier x = x.view(-1,16*16*512) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() print(model_scratch) ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv11): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv21): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (mp): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=131072, out_features=1024, bias=True) (fc2): Linear(in_features=1024, out_features=512, bias=True) (fc3): Linear(in_features=512, out_features=133, bias=True) (dropout): Dropout(p=0.5) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I tried a bunch of architectures with a sequence of convolutional and pooling layers, followed by a sequence of fully connected layers. The activation function for all layers except the last layer was relu, and for the last layer the sigmoid activation function was used because we are dealing with a classification task. For the sequence of convolutional layers, convolution was used to increase the depth of the layers while maintaining the spatial dimension, and the maxpooling layers were used to downsize the spatial dimensions, while maintaing the same depth as the input. For regularziaion, I used dropout for the fully connected portion of the model to avoid overfitting.Below is the structure of the model, as well as the sizes of the layers: (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv11): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (mp): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv2): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv21): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (mp): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv3): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (mp): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=131072, out_features=1024, bias=True) (dropout): Dropout(p=0.5) (fc2): Linear(in_features=1024, out_features=512, bias=True) (dropout): Dropout(p=0.5) (fc3): Linear(in_features=512, out_features=133, bias=True) (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr = 0.0001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly # reset gradiants to zero optimizer.zero_grad() # get output from model output = model(data) # calculate loss loss = criterion_scratch(output, target) # calculate back_prop loss.backward() # update weights and biases optimizer.step() ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) # calculate loss loss = criterion_scratch(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if(valid_loss < valid_loss_min): print('Saving model with validation loss = '+str(valid_loss.item())) valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) torch.save(model, 'model_transfer_full.pt') # return trained model return model # train the model # model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.799014 Test Accuracy: 16% (136/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders batch_size = 32 num_workers = 5 input_image_size = (224,224) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transforms = transforms.Compose([transforms.Resize(input_image_size), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) test_valid_transforms = transforms.Compose([transforms.Resize(input_image_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_data = torchvision.datasets.ImageFolder('dogImages/train', transform=train_transforms) valid_data = torchvision.datasets.ImageFolder('dogImages/valid', transform=test_valid_transforms) test_data = torchvision.datasets.ImageFolder('dogImages/test', transform=test_valid_transforms) train_data_loader_transfer = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) valid_data_loader_transfer = torch.utils.data.DataLoader(valid_data, num_workers=num_workers) test_data_loader_transfer = torch.utils.data.DataLoader(test_data, num_workers=num_workers) # define loaders dictionary loaders_transfer={'train':train_data_loader_transfer, 'valid': valid_data_loader_transfer, 'test': test_data_loader_transfer} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture to be VGG19 model_transfer = models.vgg19(pretrained=True) # freeze the feature layer parameters for parameters in model_transfer.parameters(): parameters.requires_grad = False # print model classifier to check classifier dimensions print(model_transfer.classifier) # replace the final fully connected layer of the classifier with a fully connected layer with an output of 133 # corresponding to the 133 dog breeds model_transfer.classifier[6] = nn.Linear(4096, 133) # print new model_transfer to check the classifier is modified print(model_transfer.classifier) ###Output Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ To make use of transfer learning, I chose the pre-trained VGG19 model. The model was trained to classify different types of objects, including dogs, and since our dataset is small and similar to the dataset that the model was trained on, it makes sense to freeze the convolutional layers' parameters, replace the final fully connected layer with a layer with the correct dimensions to classify our 133 dog breeds, and then retrain the fully connected-portion of the model. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code ### TODO: select loss function criterion_transfer = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_transfer = optim.Adam(model_transfer.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # move model to GPU if available if use_cuda: model_transfer = model_transfer.cuda() # train the model using same loader as scratch # model_transfer = train(50, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) # model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer = torch.load('model_transfer_full.pt') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.639146 Test Accuracy: 80% (677/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path) img_transforms = transforms.Compose([ #transforms.RandomResizedCrop(224), transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_transformed = img_transforms(image) if use_cuda: img_transformed = img_transformed.cuda() output = model_transfer(img_transformed.view(1,3,224,224)) _, preds_tensor = torch.max(output, 1) return class_names[preds_tensor.item()] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if(dog_detector(img_path)): print("Not a human, most probably a dog of breed ...\n"+predict_breed_transfer(img_path)) elif(face_detector(img_path)): print("Hello, human!, you like a ...\n"+predict_breed_transfer(img_path)) else: print("Neither a face nor a dog are deteced in the image") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The model did a pretty good job actually on classifying dog breeds with an accuracy of 80%.Possible points of improvements to get better accuracy:1- Adding more training data2- Training the whole model rather than just training the fully connected portion of the model3- Try a bigger model like ResNet150 ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): print(file) run_app(file) ###Output lfw/Angelina_Jolie/Angelina_Jolie_0012.jpg Hello, human!, you like a ... Afghan hound lfw/Angelina_Jolie/Angelina_Jolie_0010.jpg Hello, human!, you like a ... Japanese chin lfw/Angelina_Jolie/Angelina_Jolie_0007.jpg Hello, human!, you like a ... Dachshund dogImages/train/056.Dachshund/Dachshund_03969.jpg Not a human, most probably a dog of breed ... Dachshund dogImages/train/056.Dachshund/Dachshund_03927.jpg Not a human, most probably a dog of breed ... Irish setter dogImages/train/056.Dachshund/Dachshund_03939.jpg Not a human, most probably a dog of breed ... Dachshund ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/Users/Miriam/Documents/lfw/lfw/*/*")) dog_files = np.array(glob("/Users/Miriam/Documents/dogImages/dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) print(human_files[0]) ###Output /Users/Miriam/Documents/lfw/lfw\Aaron_Eckhart\Aaron_Eckhart_0001.jpg ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline print( cv2.__version__ ) #extracts a pre-trained face detector face_cascade = cv2.CascadeClassifier('/Users/Miriam/Documents/GitHub/deep-learning-pytorch/project-dog-classification/haarcascades/haarcascade_frontalface_alt.xml') print(face_cascade.empty() ) # check if it finds the xml file #loads color (BGR) image img = cv2.imread(human_files[0],0) ##r'C:\Users\Miriam\Documents\lfw\lfw\Aaron_Eckhart\Aaron_Eckhart_0001.jpg',0) #converts BGR image to grayscale print(type(img)) color = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) #cv2.COLOR_BGR2RGB plt.imshow(color) plt.title('Image') plt.show() ### gray = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) #cv2.COLOR_BGR2GRAY #finds faces in image faces = face_cascade.detectMultiScale(gray) #prints number of faces detected in the image print('Number of faces detected:', len(faces)) #gets bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) #converts BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #displays the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output 4.5.1 False <class 'numpy.ndarray'> ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ 105 %24 % ###Code #from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] number_human_faces = 0 number_human_in_dog_faces = 0 #-#-# Do NOT modify the code above this line. #-#-# for h in human_files_short: img = cv2.imread(h,0) #converts BGR image to grayscale gray = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) #cv2.COLOR_BGR2GRAY #finds faces in image faces = face_cascade.detectMultiScale(gray) number_human_faces += len(faces) #prints number of faces detected in the image print(f'percentage of the first 100 images in human_files have a detected human face: {number_human_faces} %') for d in dog_files_short: img = cv2.imread(d,0) #converts BGR image to grayscale gray = cv2.cvtColor(img, cv2.IMREAD_GRAYSCALE) #cv2.COLOR_BGR2GRAY #finds faces in image faces = face_cascade.detectMultiScale(gray) number_human_in_dog_faces += len(faces) #prints number of faces detected in the image print(f'percentage of the first 100 images in dog_files have a detected human face: {number_human_in_dog_faces} %') ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output percentage of the first 100 images in human_files have a detected human face: 105 % percentage of the first 100 images in dog_files have a detected human face: 24 % ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to C:\Users\Miriam/.cache\torch\hub\checkpoints\vgg16-397923af.pth 100.0% ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code def process_image(image): ''' Scales, crops, and normalizes a PIL image for a PyTorch model, returns an Numpy array ''' # Open the image from PIL import Image img = Image.open(image) # Resize if img.size[0] > img.size[1]: img.thumbnail((10000, 256)) else: img.thumbnail((256, 10000)) # Crop left_margin = (img.width-224)/2 bottom_margin = (img.height-224)/2 right_margin = left_margin + 224 top_margin = bottom_margin + 224 img = img.crop((left_margin, bottom_margin, right_margin, top_margin)) # Normalize img = np.array(img)/255 mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) img = (img - mean)/std # Move color channels to first dimension as expected by PyTorch img = img.transpose((2, 0, 1)) return img from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' # Process image img = process_image(img_path) img = torch.from_numpy(img).type(torch.FloatTensor) img.unsqueeze_(0) # Predict top label probs = torch.exp(VGG16.forward(img)) top_prob, index = probs.topk(1) ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return index # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. if VGG16_predict(img_path) in range(151,268+1): return True else: return False print(int(dog_detector(human_files_short[1]))) ###Output 0 ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 0 %95 % ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dh, dd = 0 , 0 #-#-# Do NOT modify the code above this line. #-#-# for h in human_files_short: dh += int(dog_detector(h)) print(f'Percentage of the images in human_files_short have a detected dog: {dh} %') for d in dog_files_short: dd += int(dog_detector(d)) print(f'Percentage of the images in dog_files_short have a detected dog: {dd} %') ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output Percentage of the images in human_files_short have a detected dog: 0 % Percentage of the images in dog_files_short have a detected dog: 95 % ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_transforms = { 'train': transforms.Compose([ transforms.ColorJitter(brightness=.2, contrast=.2, saturation=.2), transforms.RandomRotation(45), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'valid': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dirs = {'train': "/Users/Miriam/Documents/dogImages/dogImages/train", 'valid': "/Users/Miriam/Documents/dogImages/dogImages/valid", 'test': "/Users/Miriam/Documents/dogImages/dogImages/test"} image_datasets = {x: datasets.ImageFolder(dirs[x], transform=data_transforms[x]) for x in ['train', 'valid', 'test']} loaders_scratch = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64, shuffle=True) for x in ['train', 'valid', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']} class_names = image_datasets['train'].classes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:The images are cropped and normalised according to imagenet dataset. This is necessary as the pre-trained models need input images to be of same dimensions and similar distribution as the images it was trained on. The images were resized to be 224 by 224 pixels. The training data is also augmented, while the validation and testing data is only adjusted according to imagenet data. The augmentation that is added is of the form: Rotation, Horizontally and Vertically flipped, Variation in brightness, contrast and saturation (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # convolutional layer (sees 224x224x3 image tensor) self.conv1 = nn.Conv2d(3, 16, 3, padding=1) # convolutional layer (sees 112x112x112 tensor) self.conv2 = nn.Conv2d(16, 16, 3, padding=1) # convolutional layer (sees 56x56x56 tensor) self.conv3 = nn.Conv2d(16, 32, 3, padding=1) # convolutional layer (sees 28x28x28 tensor) self.conv4 = nn.Conv2d(32, 64, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (512 * 14 * 14 -> 4096) self.fc1 = nn.Linear(64*14*14, 4096) # linear layer (4096 -> 4096) self.fc2 = nn.Linear(4096, 4096) # linear layer (4096 -> number of classes) self.fc3 = nn.Linear(4096, len(class_names)) # dropout layer (p=0.5) self.dropout = nn.Dropout(0.5) def forward(self, x): ## Define forward behavior ## Define forward behavior # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) # flatten image input, size is depth of image in previous maxpool*depth of convolution x = x.view(-1, 64*14*14) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = F.relu(self.fc2(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = self.fc3(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv4): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=12544, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=4096, bias=True) (fc3): Linear(in_features=4096, out_features=133, bias=True) (dropout): Dropout(p=0.5, inplace=False) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Similar to VGG model![image.png](attachment:1a0a3903-b2d7-4a69-88c8-2874b3d48d54.png)![image.png](attachment:92839e60-5063-42a9-af12-a084bf7551b8.png) (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.0001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 2 Training Loss: 0.000000 Validation Loss: 0.000000 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders data_transforms = { 'train': transforms.Compose([ transforms.ColorJitter(brightness=.2, contrast=.2, saturation=.2), transforms.RandomRotation(45), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'valid': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dirs = {'train': "/Users/Miriam/Documents/dogImages/dogImages/train", 'valid': "/Users/Miriam/Documents/dogImages/dogImages/valid", 'test': "/Users/Miriam/Documents/dogImages/dogImages/test"} image_datasets = {x: datasets.ImageFolder(dirs[x], transform=data_transforms[x]) for x in ['train', 'valid', 'test']} loaders_scratch = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64, shuffle=True) for x in ['train', 'valid', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid', 'test']} class_names = image_datasets['train'].classes ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.densenet161(pretrained=True) if use_cuda: model_transfer = model_transfer.cuda() classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(model_transfer.classifier.in_features, model_transfer.classifier.out_features)), ('relu', nn.ReLU()), ('fc2', nn.Linear(model_transfer.classifier.out_features, len(class_names))), ('output', nn.LogSoftmax(dim=1)) ])) # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False model_transfer.classifier = classifier ###Output Downloading: "https://download.pytorch.org/models/densenet161-8d451a50.pth" to C:\Users\Miriam/.cache\torch\hub\checkpoints\densenet161-8d451a50.pth 100.0% ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False model_transfer.classifier = classifier # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. has_human_face_in_human = 0 for human_file in human_files_short: has_face = face_detector(human_file) if has_face == True: has_human_face_in_human += 1 print('How many faces in human files: %.2f %%' % (has_human_face_in_human/len(human_files_short)*100)) has_human_face_in_dog = 0 for dog_file in dog_files_short: has_face = face_detector(dog_file) if has_face == True: has_human_face_in_dog += 1 print('How many faces in dog files: %.2f %%' % (has_human_face_in_dog/len(dog_files_short)*100)) ###Output How many faces in human files: 98.00 % How many faces in dog files: 17.00 % ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models from torchvision import transforms # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transf def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) # Resize and normalize the image preprocess = transf.Compose([transf.Resize((224, 224)), transf.ToTensor(), transf.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img_preprocessed = preprocess(img) # Convert into batch for the model img_preprocessed_batch = torch.unsqueeze(img_preprocessed, 0) if use_cuda: img_preprocessed_batch = img_preprocessed_batch.cuda() VGG16.eval() out = VGG16(img_preprocessed_batch) max_indice = torch.argmax(out) return max_indice # predicted class index # Get the index of the class and display it's name index = VGG16_predict('/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg') with open("./imagenet_idx_to_labels.txt") as f: idx2label = eval(f.read()) print("The image is for class: %s" % (idx2label[int(index)])) ###Output The image is for class: affenpinscher, monkey pinscher, monkey dog ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index_class = VGG16_predict(img_path) # Return true if 151 <= index <= 268 return index_class >= 151 and index_class <= 268 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ## on the images in human_files_short and dog_files_short. has_human_face_in_human = 0 for human_file in human_files_short: has_face = dog_detector(human_file) if has_face == True: has_human_face_in_human += 1 print('How many dogs in human files: %.2f %%' % (has_human_face_in_human/len(human_files_short)*100)) has_human_face_in_dog = 0 for dog_file in dog_files_short: has_face = dog_detector(dog_file) if has_face == True: has_human_face_in_dog += 1 print('How many dogs in dog files: %.2f %%' % (has_human_face_in_dog/len(dog_files_short)*100)) ###Output How many dogs in human files: 0.00 % How many dogs in dog files: 100.00 % ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torch import numpy as np from torchvision import datasets, transforms # To answer to 'OSError: image file is truncated ' error from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size=20 num_workers=0 # Transforms with resize data_transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), # randomly flip and rotate transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transforms = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # Get the data train_data = datasets.ImageFolder('/data/dog_images/train', transform= data_transforms) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform= transforms) test_data = datasets.ImageFolder('/data/dog_images/test', transform= transforms) # Get the loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader, } # check if CUDA is available use_cuda = torch.cuda.is_available() ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I resized to 224 and the added some augmentation for the training data. About the size, I used this source to have a better understandinghttps://www.kaggle.com/c/siim-isic-melanoma-classification/discussion/160147About the augmentation I read for some good and common practices. I ended up to do horizontal flip and a small rotayion (10°) (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # For historical reason, let's try the AlexNet architecture self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) self.pool = nn.MaxPool2d(kernel_size=4, stride=4) # FC layers self.fc1 = nn.Linear(64 * 14 * 14, 4096) self.output = nn.Linear(4096, 133) self.dropout = nn.Dropout(0.2) def forward(self, x): ## Define forward behavior # Convolutionnal part x = F.relu(self.conv1(x)) x = self.pool(x) x = self.dropout(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = self.dropout(x) x = x.view(x.size(0), 64 * 14 * 14) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.output(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # check if CUDA is available use_cuda = torch.cuda.is_available() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=4, stride=4, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=12544, out_features=4096, bias=True) (output): Linear(in_features=4096, out_features=133, bias=True) (dropout): Dropout(p=0.2) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ At first I wanted to implement the AlexNet architecture.https://www.nvidia.cn/content/tesla/pdf/machine-learning/imagenet-classification-with-deep-convolutional-nn.pdfI needed to check an python implementation, because in the paper the padding or stride of each layer is not so clear for me. I check here: https://github.com/Lornatang/AlexNet-PyTorch/blob/master/alexnet_pytorch/model.pyAnd I don't understand because for me it looks like there are some differences.For example I don't understand the first Conv output size of 64. In the paper my understanding is 96.Anyway I did something much simpler at the end, because I went into out of memory issue with cuda :)The accuracy is just 10%, and I could do more. But also I don't want to use GPU and resources "just for fun", because it consumes a lot of energy (and impact on environment). So I tried to limit my experiments. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model.forward(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss with torch.no_grad(): output = model.forward(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss_min > valid_loss: print("Save a new model") state_dict = model.state_dict() torch.save(state_dict, save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.866702 Validation Loss: 4.830304 Save a new model Epoch: 2 Training Loss: 4.762878 Validation Loss: 4.694214 Save a new model Epoch: 3 Training Loss: 4.612894 Validation Loss: 4.548871 Save a new model Epoch: 4 Training Loss: 4.528327 Validation Loss: 4.490432 Save a new model Epoch: 5 Training Loss: 4.478998 Validation Loss: 4.419044 Save a new model Epoch: 6 Training Loss: 4.432183 Validation Loss: 4.366788 Save a new model Epoch: 7 Training Loss: 4.403284 Validation Loss: 4.337754 Save a new model Epoch: 8 Training Loss: 4.359342 Validation Loss: 4.304702 Save a new model Epoch: 9 Training Loss: 4.337648 Validation Loss: 4.257293 Save a new model Epoch: 10 Training Loss: 4.292906 Validation Loss: 4.240100 Save a new model Epoch: 11 Training Loss: 4.276239 Validation Loss: 4.229028 Save a new model Epoch: 12 Training Loss: 4.225767 Validation Loss: 4.207777 Save a new model Epoch: 13 Training Loss: 4.206091 Validation Loss: 4.190557 Save a new model Epoch: 14 Training Loss: 4.177770 Validation Loss: 4.202036 Epoch: 15 Training Loss: 4.132602 Validation Loss: 4.167837 Save a new model Epoch: 16 Training Loss: 4.115016 Validation Loss: 4.127340 Save a new model Epoch: 17 Training Loss: 4.055430 Validation Loss: 4.105294 Save a new model Epoch: 18 Training Loss: 4.020059 Validation Loss: 4.073741 Save a new model Epoch: 19 Training Loss: 4.004550 Validation Loss: 4.076248 Epoch: 20 Training Loss: 3.975156 Validation Loss: 4.062428 Save a new model ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.031839 Test Accuracy: 10% (89/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) print(model_transfer) # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False # Change the last layer of the FC n_features = model_transfer.classifier[6].in_features last_fc = nn.Linear(n_features, 133) model_transfer.classifier[6] = last_fc # check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__* get a pre-trained CNN for AlexNet database images. I chose the VGG16* Replace the last layer of the fully connected part. The reason is to define the output size corresponding to our new classification problem.This architecture is suited for the current problem, because the features learned by the CNN are useful for our dog classification. There are many dog classes in the AlexNet classification, that is very useful for us. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') ###Output Epoch: 1 Training Loss: 2.474720 Validation Loss: 0.747571 Save a new model Epoch: 2 Training Loss: 1.461017 Validation Loss: 0.630401 Save a new model Epoch: 3 Training Loss: 1.295029 Validation Loss: 0.568868 Save a new model Epoch: 4 Training Loss: 1.224544 Validation Loss: 0.548736 Save a new model Epoch: 5 Training Loss: 1.178391 Validation Loss: 0.549557 Epoch: 6 Training Loss: 1.128479 Validation Loss: 0.538183 Save a new model Epoch: 7 Training Loss: 1.071180 Validation Loss: 0.497095 Save a new model Epoch: 8 Training Loss: 1.074798 Validation Loss: 0.553495 Epoch: 9 Training Loss: 1.046458 Validation Loss: 0.575322 Epoch: 10 Training Loss: 1.020469 Validation Loss: 0.483531 Save a new model ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.603404 Test Accuracy: 83% (695/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. from PIL import Image # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) def predict_breed_transfer(img_path): # Resize and normalize the image preprocess_ = transf.Compose([transf.Resize((224, 224)), transf.ToTensor(), transf.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = Image.open(img_path) img_preprocessed_ = preprocess_(img) # Convert into batch for the model img_preprocessed_batch_ = torch.unsqueeze(img_preprocessed_, 0) if use_cuda: img_preprocessed_batch_ = img_preprocessed_batch_.cuda() model_transfer.eval() out = model_transfer(img_preprocessed_batch_) max_indice = torch.argmax(out) return class_names[max_indice] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither message = "Hello ! " breed = '' # Check if human is_a_human = face_detector(img_path) # Check if dog is_a_dog = dog_detector(img_path) # If not dog or human, display an error message if not is_a_human and not is_a_dog: return "You are an alien I guess !" breed = predict_breed_transfer(img_path) if is_a_human and is_a_dog: message += 'Well..you are both a dog and human..' elif is_a_human: message += 'You are a human !' elif is_a_dog: message += 'You are a dog !' message += '\n You look like a %s' % breed return message ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Here are 3 improvements based on 3 innacuracies on my 6 images:* we see 2 dogs who are considered as human and dog. It is because I used the simple face_dector. And there is 17% to see a face in a dog picture. The first improvement would be to have a better detector for this* there is one dog with a wrong prediction of dog breed. This is not that surprising because this dog has an unsusual physical shape, and he doesn't really represent it's breed. But one way to improve this would be to train with more images. Of course the pretrained model helped a lot, but for this kind of edge cases, maybe much more than the 8000 images used to train the model_predict* The goat is mislabeled as a dog. There is maybe 2 things to do with that. one is obvious is to train the model with more animals and classes. Maybe the other option, would be to use a minimum score for the softmax output, and if the score is too low, then chose to not label it (and show a message saying "Sorry, we could not recognize".(instead picking a cat, I couldn't resist to add my favorite animal, the goat. ###Code import numpy as np from PIL import Image from glob import glob import matplotlib.pyplot as plt %matplotlib inline ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below fun_files = np.array(glob("./algo_testing/*")) # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(20, 6)) for idx, file_path in enumerate(fun_files): ax = fig.add_subplot(2, 3, idx+1, xticks=[], yticks=[]) file_image = Image.open(file_path) plt.imshow(file_image) message = run_app(file_path) ax.set_title(message) # Display the results fig.subplots_adjust(wspace=1.5) plt.show() ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. h_detections, d_detections = 0, 0 for idx in tqdm(range(len(human_files_short))): if face_detector(human_files_short[idx]): h_detections +=1 for idx in tqdm(range(len(dog_files_short))): if face_detector(dog_files_short[idx]): d_detections +=1 print(f"Human detection rate in human dataset: {h_detections/len(human_files_short):.0%}") print(f"Human detection rate in dog dataset: {d_detections/len(dog_files_short):.0%}") ###Output 100%|██████████| 100/100 [00:01<00:00, 51.56it/s] 100%|██████████| 100/100 [00:20<00:00, 4.34it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # check if CUDA is available use_cuda = torch.cuda.is_available() # define VGG16 model VGG16 = models.vgg16(pretrained=True) # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code norm_mean = [0.485, 0.456, 0.406] norm_std = [0.229, 0.224, 0.225] img_size = 224 from PIL import Image import torchvision.transforms as transforms def model_predict(img_path, model): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' model.eval() ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image ## img = Image.open(img_path).convert('RGB') img = Image.open(img_path) transf = transforms.Normalize(norm_mean, norm_std) in_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(img_size), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std)]) img = in_transform(img).unsqueeze(0).cuda() output = model(img) return output.data.cpu().numpy().argmax() # predicted class index model_predict('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg', VGG16) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path, model): ## TODO: Complete the function. dog_detected = False if 151 <= model_predict(img_path, model) <= 268: dog_detected = True return dog_detected # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. d1, d2 = 0, 0 for idx in tqdm(range(len(human_files_short))): if dog_detector(human_files_short[idx], VGG16): d1 +=1 for idx in tqdm(range(len(dog_files_short))): if dog_detector(dog_files_short[idx], VGG16): d2 +=1 print("=============VGG16============") print(f"Dog detection rate in human dataset: {d1/len(human_files_short):.0%}") print(f"Dog detection rate in dog dataset: {d2/len(dog_files_short):.0%}") ###Output 100%|██████████| 100/100 [00:02<00:00, 37.65it/s] 100%|██████████| 100/100 [00:03<00:00, 30.79it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. # define Resnet50 model Resnet50 = models.resnet50(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: Resnet50 = Resnet50.cuda() ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. d1, d2 = 0, 0 for idx in tqdm(range(len(human_files_short))): if dog_detector(human_files_short[idx], Resnet50): d1 +=1 for idx in tqdm(range(len(dog_files_short))): if dog_detector(dog_files_short[idx], Resnet50): d2 +=1 print("======RESNET50========") print(f"Dog detection rate in human dataset: {d1/len(human_files_short):.0%}") print(f"Dog detection rate in dog dataset: {d2/len(dog_files_short):.0%}") ###Output 100%|██████████| 100/100 [00:01<00:00, 62.03it/s] 100%|██████████| 100/100 [00:02<00:00, 42.64it/s] ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os train_dir = os.path.join('dogImages/train') valid_dir = os.path.join('dogImages/valid') test_dir = os.path.join('dogImages/test') import torch from torchvision import datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(20), transforms.ColorJitter(brightness=0.2, contrast=0.2), transforms.ToTensor(), transforms.Normalize(mean=norm_mean, std=norm_std)]) train_data = datasets.ImageFolder(train_dir, transform=data_transform) valid_data = datasets.ImageFolder(valid_dir, transform=data_transform) test_data = datasets.ImageFolder(test_dir, transform=data_transform) # define dataloader parameters batch_size = 32 num_workers=0 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- The input image is resized by cropping to a 224x224 picture. The size is picked to avoid requiring further modifications of the pretrained models, as ImageNet models are designed and trained for 224x224 size.- As a first step, before encountering serious overfitting, I limited the transformations to : random crop, flip, and colorjitter. I only used brightness and contrast alterations in colorjitter to avoid misleading the network (as the notebook indicates that color might be a significant feature), and incorporated the crop with the resize. If I do encounter issues due to limited transformations, I would consider rotations. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F class Flatten(nn.Module): def forward(self, x): return x.view(x.size()[0], -1) # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.model = nn.Sequential( nn.Conv2d(3, 16, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(16, 32, 5, padding=2), nn.ReLU(), nn.MaxPool2d(2, 2), nn.Conv2d(32, 64, 5, padding=2), nn.ReLU(), nn.MaxPool2d(2, 2), Flatten(), nn.Linear(64 * 28 * 28, 500), nn.Dropout(p=0.2), nn.Linear(500, 133), nn.LogSoftmax(dim=1) ) def forward(self, x): ## Define forward behavior x = self.model(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() from torchsummary import summary print(summary(model_scratch, (3, 224, 224))) ###Output ---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 16, 224, 224] 448 ReLU-2 [-1, 16, 224, 224] 0 MaxPool2d-3 [-1, 16, 112, 112] 0 Conv2d-4 [-1, 32, 112, 112] 12,832 ReLU-5 [-1, 32, 112, 112] 0 MaxPool2d-6 [-1, 32, 56, 56] 0 Conv2d-7 [-1, 64, 56, 56] 51,264 ReLU-8 [-1, 64, 56, 56] 0 MaxPool2d-9 [-1, 64, 28, 28] 0 Flatten-10 [-1, 50176] 0 Linear-11 [-1, 500] 25,088,500 Dropout-12 [-1, 500] 0 Linear-13 [-1, 133] 66,633 LogSoftmax-14 [-1, 133] 0 ================================================================ Total params: 25,219,677 Trainable params: 25,219,677 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.57 Forward/backward pass size (MB): 24.51 Params size (MB): 96.21 Estimated Total Size (MB): 121.29 ---------------------------------------------------------------- None ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ - Started with a very shallow network with conv layers with increasing kernel sizes (along with perfect padding), and max pooling layers. I used a minimal depth of 3 conv+maxpool layers + 2 FC. My goal here is to check how far I can get with a very simple model and estimate if I need more depth / width or change architectures / techniques. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=1e-4) loaders_scratch = dict(train=train_loader, valid=valid_loader, test=test_loader) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() output = model(data) # calculate the loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) # calculate the loss loss = criterion(output, target) ## record the average training loss, using something like valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) # Save Model State on Checkpoint torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = dict(train=train_loader, valid=valid_loader, test=test_loader) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # model_transfer = models.densenet161(pretrained=True) model_transfer = models.vgg16(pretrained=True) if use_cuda: model_transfer = model_transfer.cuda() # Freeze training for all "features" layers for _, param in model_transfer.named_parameters(): param.requires_grad = True # VGG16 n_inputs = model_transfer.classifier[6].in_features #Others # n_inputs = model_transfer.classifier.in_features # add last linear layer (n_inputs -> 102 flower classes) # new layers automatically have requires_grad = True #last_layer = nn.Linear(n_inputs, len(classes)) last_layer = nn.Sequential(nn.Linear(n_inputs, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, 133)) # VGG16 model_transfer.classifier[6] = last_layer # Others # model_transfer.classifier = last_layer # if GPU is available, move the model to GPU if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ - Deeper architectures such as densenet161 might perform very well on the data set, but starting with a simpler one allows us to iterate faster and find suiting techniques / hyperparameters more efficiently. I picked vgg16 since it's a rather shallow model. - As usual, we load the pretrained weights and only replace the classifier layers for custom FC that we will be training. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=1e-4) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path, model): # load the image and return the predicted breed model.eval() ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image ## img = Image.open(img_path).convert('RGB') img = Image.open(img_path) transf = transforms.Normalize(norm_mean, norm_std) img = data_transform(img).unsqueeze(0).cuda() output = model(img) return class_names[output.data.cpu().numpy().argmax()] # predicted class index ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither target = 'Human' img_split = img_path.split('/') if img_split[0] == 'dogImages': target = f"Dog - {img_split[2].split('.')[-1]}" if dog_detector(img_path, VGG16): print(f"Oh my, this is a {predict_breed_transfer(img_path, model_transfer)}! (actually {target})") elif face_detector(img_path): print(f"You look like a {predict_breed_transfer(img_path, model_transfer)} (actually {target})") else: print("Nothing detected!") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Test accuracy is about 60% and the results below seem to confirm this. As per possible points of improvement:- consider better balance of training set as the distribution might reveal great variance in occurrences- Consider using harsher loss for wrong predictions (focal loss for instance)- Add more FC to the architecture (with dropout) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:10])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/project-dog-classification/images//sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code # download the dataset and other files ! curl -sS https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip > dogImages.zip && unzip -q dogImages.zip && rm dogImages.zip >/dev/null 2>&1 ! curl -sS https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip > lfw.zip && unzip -q lfw.zip && rm lfw.zip >/dev/null 2>&1 ! curl -s https://codeload.github.com/udacity/deep-learning-v2-pytorch/tar.gz/master | tar -xz --strip=2 deep-learning-v2-pytorch-master/project-dog-classification/haarcascades/ >/dev/null 2>&1 ! curl -s https://codeload.github.com/udacity/deep-learning-v2-pytorch/tar.gz/master | tar -xz --strip=2 deep-learning-v2-pytorch-master/project-dog-classification/images/ >/dev/null 2>&1 # install pytorch import sys try: import torch except: import os os.environ['TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD']='2000000000' # http://pytorch.org/ from os.path import exists from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag()) cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/' accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu' !{sys.executable} -m pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision >/dev/null # PIL version installed in colab (5.3) is not working properly # downgrade it to 4.2 !{sys.executable} -m pip uninstall -y -q pillow !{sys.executable} -m pip install -q pillow==4.2 import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.axis('off') plt.show(); ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/project-dog-classification/images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 0 total human images. There are 0 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm from functools import reduce human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# def percent_human(files, detector): detected = list(map(detector, files)) return sum(map(lambda x: 1 if x else 0, detected)) / len(detected) print("percent of humans detected to be humans: {}".format(percent_human(human_files_short, face_detector))) print("percent of dogs detected to be humans: {}".format(percent_human(dog_files_short, face_detector))) ###Output percent of humans detected to be humans: 0.98 percent of dogs detected to be humans: 0.17 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code import time from os import listdir cascades = { cascade:file for [cascade, file] in map(lambda x : [x[24:-4], 'haarcascades/' + x], listdir('haarcascades')) } def detect_object_cascade(cascade_file, img_path): cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) objects = cascade.detectMultiScale(gray) return len(objects) > 0 def percent_detected(img_files, detector): detected = list(map(detector, img_files)) return sum(map(lambda x: 1 if x else 0, detected)) / len(detected) def rate_detector(detection_method, detector): start = time.time() print("rating detection method {}".format(detection_method)) print("percent of human images detected: {}".format(percent_detected(human_files_short, detector))) print("percent of dogs images detected: {}".format(percent_detected(dog_files_short, detector))) print("took {} seconds".format(time.time() - start)) print("=========") def cascade_detector(cascade_file): return lambda x : detect_object_cascade(cascade_file, x) for cascade_name, cascade_file in cascades.items(): rate_detector(cascade_name, cascade_detector(cascade_file)) ###Output rating detection method alt2 percent of human images detected: 0.98 percent of dogs images detected: 0.17 took 92.338787317276 seconds ========= rating detection method alt_tree percent of human images detected: 0.98 percent of dogs images detected: 0.17 took 90.6826696395874 seconds ========= rating detection method default percent of human images detected: 0.98 percent of dogs images detected: 0.17 took 90.64785432815552 seconds ========= rating detection method alt percent of human images detected: 0.98 percent of dogs images detected: 0.17 took 90.48003697395325 seconds ========= ###Markdown ConclusionNone of them were any better than any other in detection and the difference in time it took to run was negligble and could have been due to any number of factors --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print(VGG16) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace=True) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace=True) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace=True) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace=True) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace=True) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace=True) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace=True) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace=True) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace=True) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace=True) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace=True) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace=True) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace=True) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (avgpool): AdaptiveAvgPool2d(output_size=(7, 7)) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace=True) (2): Dropout(p=0.5, inplace=False) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace=True) (5): Dropout(p=0.5, inplace=False) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def classify(model, img_path): if use_cuda: model = model.cuda() for param in model.parameters(): param.requires_grad_(False) model.eval() original_image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) image = in_transform(original_image)[:3,:,:].unsqueeze(0) image.unsqueeze(0) if use_cuda: image = image.cuda() output = model(image) index = torch.max(output, 1)[1][0].item() return index def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' return classify(VGG16, img_path) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): return 151 <= VGG16_predict(img_path) <= 268 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code rate_detector('VGG16 based dog detection', dog_detector) ###Output rating detection method VGG16 based dog detection percent of human images detected: 0.0 percent of dogs images detected: 1.0 took 8.139695405960083 seconds ========= ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code import torch import torchvision.models as models use_cuda = torch.cuda.is_available() some_models = { 'resnet18' : models.resnet18(pretrained=True), 'alexnet' : models.alexnet(pretrained=True), 'squeezenet' : models.squeezenet1_0(pretrained=True), 'vgg16' : models.vgg16(pretrained=True), 'densenet' : models.densenet161(pretrained=True), 'inception' : models.inception_v3(pretrained=True), #Apparently some environments are missing a number of models AttributeError: module 'torchvision.models' has no attribute 'googlenet' # 'googlenet' : models.googlenet(pretrained=True), # 'shufflenet' : models. shufflenet_v2_x1_0(pretrained=True), # 'mobilenet' : models.mobilenet_v2(pretrained=True), # 'resnext50_32x4d' : models.resnext50_32x4d(pretrained=True), # 'wide_resnet50_2' : models.wide_resnet50_2(pretrained=True), # 'mnasnet' : models.mnasnet1_0(pretrained=True) } def generate_dog_detector(model): return lambda x: 151 <= classify(model, x) <= 268 for model_name, model in some_models.items(): rate_detector("{} based dog detection".format(model_name), generate_dog_detector(model)) ###Output rating detection method resnet18 based dog detection percent of human images detected: 0.01 percent of dogs images detected: 1.0 took 3.8545022010803223 seconds ========= rating detection method alexnet based dog detection percent of human images detected: 0.01 percent of dogs images detected: 0.99 took 2.891031265258789 seconds ========= rating detection method squeezenet based dog detection percent of human images detected: 0.03 percent of dogs images detected: 1.0 took 3.1622188091278076 seconds ========= rating detection method vgg16 based dog detection percent of human images detected: 0.0 percent of dogs images detected: 1.0 took 7.586473226547241 seconds ========= rating detection method densenet based dog detection percent of human images detected: 0.0 percent of dogs images detected: 1.0 took 14.627135992050171 seconds ========= rating detection method inception based dog detection ###Markdown Conclusionvgg16 and densenet had the same accuracy, but vgg16 was approximately twice as fast. Looks like inception wants a larger base image according to torchvision [docs](https://pytorch.org/docs/master/torchvision/models.htmlinception-v3).> Important: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly.I don't think it's worth further investigation at this time. vgg16 seems reasonable. --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes base_dir = '/data/dog_images' train_transform = transforms.Compose([transforms.Resize(300), transforms.RandomResizedCrop(256), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) test_transform = transforms.Compose([transforms.Resize(300), transforms.CenterCrop(256), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_data = datasets.ImageFolder(base_dir + '/train', transform=train_transform) valiadation_data = datasets.ImageFolder(base_dir + '/valid', transform=test_transform) test_data = datasets.ImageFolder(base_dir + '/test', transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=10, shuffle=True) validation_loader = torch.utils.data.DataLoader(valiadation_data, batch_size=10, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=10, shuffle=True) loaders_scratch = { 'train': train_loader, 'valid': validation_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I decided to pick a larger image size of 512 x 512, because it looks like we don't have that many files, and so I'm favoring larger images over training speed in hopes of getting richer training. Also, powers of 2 should allow for a good number of transforms. This did not work out as I kept running out of memory. I'm resizing then center cropping for test data. Resizing first should mean that our crop is a significant portion of the image regardless of original size. Center cropping should allow us to throw away some background noise.For training, I decided to use RandomResizeCrop because it feels like that should do mostly the same thing but expand the dataset through some randomness. I decided to augment the dataset for training in order to extend the training material at least in part because it looks like we don't have that many files. I chose rotation of up to 30 degrees as I expect most images to be relatively vertical, but to have dog heads cocked occasionally. I chose to have horizontal flips since dogs are mostly symmetric, and it should thus effectively double the input data.I went back and forth on this stuff a bit more as I expanded my research, but ended up mostly back where I started from. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() ## Define layers of a CNN conv_depths = [[3, 16], [16, 32], [32, 64], [64, 128], [128, 256]] self.conv_layers = nn.ModuleList( [nn.Conv2d(in_layers, out_layers, 3, padding = 1) for in_layers, out_layers in conv_depths]) self.batch_norm_layers = nn.ModuleList( [nn.BatchNorm2d(out_layers) for in_layers, out_layers in conv_depths]) self.pool = nn.MaxPool2d(2,2) input_xy = 256 num_pools = len(conv_depths) xy_redux = 2**num_pools output_xy = int(input_xy / xy_redux) conv_depth = conv_depths[-1][1] self.global_avg_pool = nn.AvgPool2d(output_xy) # without the global average pooling layer the next line is true # self.linear_input = conv_depth * output_xy * output_xy self.linear_input = conv_depth linear_dims = [[self.linear_input, 4096], [4096, 2048], [2048, 1024]] self.middle_linear_layers = nn.ModuleList( [nn.Linear(in_layers, out_layers) for in_layers, out_layers in linear_dims]) self.last_layer = nn.Linear(linear_dims[-1][1], 133) self.dropout = nn.Dropout(0.2) def forward(self, x): for i, conv_layer in enumerate(self.conv_layers): x = self.batch_norm_layers[i](self.pool(F.relu(conv_layer(x)))) x = self.global_avg_pool(x) x = x.view(-1, self.linear_input) self.dropout(x) for layer in self.middle_linear_layers: x = self.dropout(F.relu(layer(x))) return self.last_layer(x) #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() print(model_scratch) ###Output Net( (conv_layers): ModuleList( (0): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (4): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) ) (batch_norm_layers): ModuleList( (0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (global_avg_pool): AvgPool2d(kernel_size=8, stride=8, padding=0) (middle_linear_layers): ModuleList( (0): Linear(in_features=256, out_features=4096, bias=True) (1): Linear(in_features=4096, out_features=2048, bias=True) (2): Linear(in_features=2048, out_features=1024, bias=True) ) (last_layer): Linear(in_features=1024, out_features=133, bias=True) (dropout): Dropout(p=0.2, inplace=False) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ First, I wanted to be able to change the sizes and number of my layers without having to recalculate everything so I made it based on some configuration.Next, I was inspired by the vgg16. It had a bit of a larger problem space than just dog breed classification but it seemed to do a pretty good job on dog breed classification. I decided to mimic the idea of multiple convolutional layers per max pooling layer and the 3 linear layers at the end. Since we have a much smaller data set, I figure we have a smaller risk of overfitting and so chose a smaller dropout.I found a number of hardware limitations. Tried to make the convolutional layers too large and it took too long to just create the net. Then I ended up running out of RAM, and started scaling things back. I also seemed to get a truncated file issue so I followed https://github.com/keras-team/keras/issues/5475 .My scaled back solution wasn't working. I went through various articles. I found a few implementations of a dog breed classifier in Keras. This one looked useful https://raw.githubusercontent.com/poojasriravichandran/Dog_App/master/dog_app.ipynb . I found it was using this global pooling layer. That lead me to a number of articles, https://keras.io/layers/pooling/ https://discuss.pytorch.org/t/global-average-pooling-in-pytorch/6721 https://alexisbcook.github.io/2017/global-average-pooling-layers-for-object-localization/ <-- by one of our instructors. But the global averaging didn't crack the problem.A bunch more research and banging my head against the wall, I found https://medium.com/@uijaz59/dog-breed-classification-using-pytorch-207cf27c2031 . They were doing batch normalization. I read up on that https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821c and added it to my network. BAM! Magic bullet to get from 1% accuracy to 47%.Other things I tried: various batch sizes, various numbers and sizes of convolutional and dense layers, a couple different optimizers. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code import time def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf #0.652 for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() start = time.time() for batch_idx, (data, target) in enumerate(train_loader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += (loss.data - train_loss) / (batch_idx + 1) #print('did one') ###################### # validate the model # ###################### model.eval() correct = 0.0 total = 0.0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(validation_loader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss += (loss.data - valid_loss) / (batch_idx + 1) pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) # print training/validation statistics print('Epoch: {} Time: {} Training Loss: {:.3f} Validation Loss: {:.3f} Val accuracy {:.3f} Correct {:.0f} Total {:.0f}'.format( epoch, time.time() - start, train_loss, valid_loss, correct / total, correct, total )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) # return trained model return model # train the model from workspace_utils import active_session from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True with active_session(): model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 1.914108 Test Accuracy: 53% (449/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code base_dir = '/data/dog_images' train_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder(base_dir + '/train', transform=train_transform) valiadation_data = datasets.ImageFolder(base_dir + '/valid', transform=test_transform) test_data = datasets.ImageFolder(base_dir + '/test', transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=10, shuffle=True) validation_loader = torch.utils.data.DataLoader(valiadation_data, batch_size=10, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=10, shuffle=True) loaders_transfer = { 'train': train_loader, 'valid': validation_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn model_transfer = models.vgg16(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.classifier[6] = nn.Linear(model_transfer.classifier[6].in_features, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ From what we found earlier it looks like the VGG16 did a pretty good job at classifying dog breeds already. So, I began by following what we did in the transfer learning solution for flowers, but decided I only need to retrain and replace the last layer. Bascially, it's already good at detecting dog breeds, but the values that it outputs have changed so it shouldn't need a lot of manipulation. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier[6]. parameters(), lr = 0.0001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model # train the model from workspace_utils import active_session from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True with active_session(): model_transfer = train(100, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt', map_location=torch.device('cpu'))) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.605507 Test Accuracy: 83% (700/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] #class_names = [item[4:].replace("_", " ") for item in train_data.classes] class_names = ['Affenpinscher', 'Afghan hound', 'Airedale terrier', 'Akita', 'Alaskan malamute', 'American eskimo dog', 'American foxhound', 'American staffordshire terrier', 'American water spaniel', 'Anatolian shepherd dog', 'Australian cattle dog', 'Australian shepherd', 'Australian terrier', 'Basenji', 'Basset hound', 'Beagle', 'Bearded collie', 'Beauceron', 'Bedlington terrier', 'Belgian malinois', 'Belgian sheepdog', 'Belgian tervuren', 'Bernese mountain dog', 'Bichon frise', 'Black and tan coonhound', 'Black russian terrier', 'Bloodhound', 'Bluetick coonhound', 'Border collie', 'Border terrier', 'Borzoi', 'Boston terrier', 'Bouvier des flandres', 'Boxer', 'Boykin spaniel', 'Briard', 'Brittany', 'Brussels griffon', 'Bull terrier', 'Bulldog', 'Bullmastiff', 'Cairn terrier', 'Canaan dog', 'Cane corso', 'Cardigan welsh corgi', 'Cavalier king charles spaniel', 'Chesapeake bay retriever', 'Chihuahua', 'Chinese crested', 'Chinese shar-pei', 'Chow chow', 'Clumber spaniel', 'Cocker spaniel', 'Collie', 'Curly-coated retriever', 'Dachshund', 'Dalmatian', 'Dandie dinmont terrier', 'Doberman pinscher', 'Dogue de bordeaux', 'English cocker spaniel', 'English setter', 'English springer spaniel', 'English toy spaniel', 'Entlebucher mountain dog', 'Field spaniel', 'Finnish spitz', 'Flat-coated retriever', 'French bulldog', 'German pinscher', 'German shepherd dog', 'German shorthaired pointer', 'German wirehaired pointer', 'Giant schnauzer', 'Glen of imaal terrier', 'Golden retriever', 'Gordon setter', 'Great dane', 'Great pyrenees', 'Greater swiss mountain dog', 'Greyhound', 'Havanese', 'Ibizan hound', 'Icelandic sheepdog', 'Irish red and white setter', 'Irish setter', 'Irish terrier', 'Irish water spaniel', 'Irish wolfhound', 'Italian greyhound', 'Japanese chin', 'Keeshond', 'Kerry blue terrier', 'Komondor', 'Kuvasz', 'Labrador retriever', 'Lakeland terrier', 'Leonberger', 'Lhasa apso', 'Lowchen', 'Maltese', 'Manchester terrier', 'Mastiff', 'Miniature schnauzer', 'Neapolitan mastiff', 'Newfoundland', 'Norfolk terrier', 'Norwegian buhund', 'Norwegian elkhound', 'Norwegian lundehund', 'Norwich terrier', 'Nova scotia duck tolling retriever', 'Old english sheepdog', 'Otterhound', 'Papillon', 'Parson russell terrier', 'Pekingese', 'Pembroke welsh corgi', 'Petit basset griffon vendeen', 'Pharaoh hound', 'Plott', 'Pointer', 'Pomeranian', 'Poodle', 'Portuguese water dog', 'Saint bernard', 'Silky terrier', 'Smooth fox terrier', 'Tibetan mastiff', 'Welsh springer spaniel', 'Wirehaired pointing griffon', 'Xoloitzcuintli', 'Yorkshire terrier'] def predict_breed_transfer(img_path): return class_names[classify(model_transfer, img_path)] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### Feel free to use as many code cells as needed. from enum import Enum class Detected_Entity(Enum): BAMBOOZLE = 0 HOOMAN = 1 DOGGO = 2 def detect_entity(img_path): if dog_detector(img_path): return Detected_Entity.DOGGO elif face_detector(img_path): return Detected_Entity.HOOMAN else: return Detected_Entity.BAMBOOZLE greetings = { Detected_Entity.BAMBOOZLE: 'Hello fren,', Detected_Entity.DOGGO: 'Hello doggo fren,', Detected_Entity.HOOMAN: 'Hello hooman fren,' } intro = { Detected_Entity.BAMBOOZLE: 'this do me a heckin bamboozle, is it a snek?', Detected_Entity.DOGGO: 'I think you is a ...', Detected_Entity.HOOMAN: 'I think you look like a ...' } def show_image(img_path): img = cv2.imread(img_path) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() def run_app(img_path): entity = detect_entity(img_path) print(greetings[entity]) show_image(img_path) print(intro[entity]) if entity != Detected_Entity.BAMBOOZLE: print(predict_breed_transfer(img_path)) print('') print('') print('') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__I think it did about what I expected.Points for improvement:1. More training data2. We could use batch normalization on the linear layers and a global pooling layer between the convolutional layers and the linear layers3. VGG16 seemed pretty good already at detecting dog breeds and we lost some of that by retraining at all, we could just map the ImageNet breed indices to our indices instead of retraining it.4. We could experiment with momentum. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below from os import listdir from os.path import isfile, join folder = 'images/random' for file in [f for f in listdir(folder) if isfile(join(folder, f)) and f[:1] != '.']: run_app(folder + '/' + file) #for file in np.hstack((human_files[:3], dog_files[:3])): # run_app(file) ###Output Hello hooman fren, ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("data/lfw/*/*")) dog_files = np.array(glob("data/dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Human faces detected 96.0%Human faces detected in dog images 18.0% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_num = np.average([face_detector(img) for img in tqdm(human_files_short)])*100 dog_num = np.average([face_detector(img) for img in tqdm(dog_files_short)])*100 print("Human faces detected {}%".format(human_num)) print("Human faces detected in dog images {}%".format(dog_num)) ###Output 100%|██████████| 100/100 [00:02<00:00, 45.53it/s] 100%|██████████| 100/100 [00:07<00:00, 12.66it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available4 use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image #Load image img = Image.open(img_path).convert('RGB') #Transform img_transform = transforms.Compose([transforms.Resize(290),transforms.CenterCrop(256),transforms.ToTensor(),transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))]) #CUDA img_tensor = img_transform(img).unsqueeze(0) img_tensor = img_tensor.cuda() predict = VGG16(img_tensor) predict = predict.cpu() index = predict.data.numpy().argmax() return index # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return (151 <= index and index <=268) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Dog faces detected 97.0%Dog faces detected in human images 0.0% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_num2 = np.average([dog_detector(img) for img in tqdm(human_files_short)])*100 dog_num2 = np.average([dog_detector(img) for img in tqdm(dog_files_short)])*100 print("Dog faces detected {}%".format(dog_num2)) print("Dog faces detected in human images {}%".format(human_num2)) ###Output 100%|██████████| 100/100 [00:01<00:00, 91.36it/s] 100%|██████████| 100/100 [00:01<00:00, 55.06it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ##train transforms train_img_transform = transforms.Compose([transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(p=0.5),transforms.ToTensor(),transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))]) ##test and validation transforms valid_test_img_transform = transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))]) ##data location for dogs train_data = datasets.ImageFolder('data/dogImages/train',transform=train_img_transform) valid_data = datasets.ImageFolder('data/dogImages/valid',transform=valid_test_img_transform) test_data = datasets.ImageFolder('data/dogImages/test',transform=valid_test_img_transform) ##load data train = torch.utils.data.DataLoader(train_data,batch_size = 16,shuffle=True,num_workers=24) valid = torch.utils.data.DataLoader(valid_data,batch_size = 16,shuffle=True,num_workers=24) test = torch.utils.data.DataLoader(test_data,batch_size = 16,shuffle=True,num_workers=24) loaders_scratch = { 'train': train, 'valid': valid, 'test': test } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:Train:Random resize crop = 224 - takes 224x224 random crop from imageRandom Horizontal flip probability 0.5 - randomly flips images horizontallyTo tensor - sends to tensorTesting and validation:Resize = 256 - resizes images to 256x256Random resize crop = 224 - takes 224x224 random crop from imageRandom Horizontal flip probability 0.5 - randomly flips images horizontally To tensor - sends to tensorResizing and cropping - VG116 uses 224x224. Resize and cropping can also help with accuracy.Flipping - helps generalisation (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F num_breeds = 133 # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN super().__init__() self.conv1 = nn.Conv2d(3,16,kernel_size=3,stride=1,padding=1) self.norm2d1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16,32,kernel_size=3,stride=1,padding=1) self.norm2d2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32,64,kernel_size=3,stride=1,padding=1) self.norm2d3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(64,128,kernel_size=3,stride=1,padding=1) self.norm2d4 = nn.BatchNorm2d(128) self.conv5 = nn.Conv2d(128,256,kernel_size=3,stride=1,padding=1) self.norm2d5 = nn.BatchNorm2d(256) # pooling layer self.pool = nn.MaxPool2d(kernel_size=2,stride=2,padding=0) # linear layer self.fc1 = nn.Linear(256*7*7,256) self.fc2 = nn.Linear(256,num_breeds) # dropout layer self.dropout = nn.Dropout(0.5) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.norm2d1(self.conv1(x)))) x = self.pool(F.relu(self.norm2d2(self.conv2(x)))) x = self.pool(F.relu(self.norm2d3(self.conv3(x)))) x = self.pool(F.relu(self.norm2d4(self.conv4(x)))) x = self.pool(F.relu(self.norm2d5(self.conv5(x)))) x = x.view(-1, 256 * 7 * 7) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() model_scratch = nn.DataParallel(model_scratch) print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output DataParallel( (module): Net( (conv1): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv4): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv5): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d5): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=12544, out_features=256, bias=True) (fc2): Linear(in_features=256, out_features=133, bias=True) (dropout): Dropout(p=0.5, inplace=False) ) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I'm using multiple convolutional layers and filters to try and find a range of detail in each image that can then later be used to identify images. This network uses 3 layers and 3 filters. The size of the input image is height 224, width 224 and depth of 3. A small filter size is used to capture the small details in the images. In this case we are not trying to just determine if it is just a dog but also which breed which can rely on very fine details being determined.1st convolutional layer receives an image with 224x224x3 size (3 is for the colour channels RGB). This is then passed through the layer with a filter size of 3, stride size of 1 and padding of 1 giving a convolutional layer size of 223x223x16. Pooled this is 112x112x16.2nd layer receives a 112x112x16 tensor and produces a 111x111x32 convolutional layer. Pooled this is 56x56x32.3rd layer receives a 56x56x32 tensor and produces a 55x55x64 convolutional layer. Pooled this is 28x28x64.4th layer receives a 28x28x64 tensor and produces a 27x27x128 convolutional layer. Pooled this is 14x14x128.5th layer receives a 14x14x128 tensor and produces a 7x7x7256 convolutional layer. Pooled this is 7x7x256.The data is normalised between layers to make all the input features the same scale. All layers are normalised to prevent the training process slowing down and avoid internal covariate shift.Pooling is used to reduce the size of the parameters between each layer. This helps control overfitting and computation.Drop out is used to remove nodes and prevent overfitting. It can help reduce co-dependency between nodes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss last_valid_loss = None if last_valid_loss is not None: valid_loss_min = last_valid_loss else: valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased') valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(200, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.903283 Validation Loss: 4.888394 Validation loss decreased Epoch: 2 Training Loss: 4.882251 Validation Loss: 4.863041 Validation loss decreased Epoch: 3 Training Loss: 4.852262 Validation Loss: 4.827380 Validation loss decreased Epoch: 4 Training Loss: 4.800028 Validation Loss: 4.699440 Validation loss decreased Epoch: 5 Training Loss: 4.758493 Validation Loss: 4.580768 Validation loss decreased Epoch: 6 Training Loss: 4.722195 Validation Loss: 4.556592 Validation loss decreased Epoch: 7 Training Loss: 4.709281 Validation Loss: 4.494385 Validation loss decreased Epoch: 8 Training Loss: 4.695731 Validation Loss: 4.504717 Epoch: 9 Training Loss: 4.693161 Validation Loss: 4.465738 Validation loss decreased Epoch: 10 Training Loss: 4.679605 Validation Loss: 4.484573 Epoch: 11 Training Loss: 4.662796 Validation Loss: 4.412214 Validation loss decreased Epoch: 12 Training Loss: 4.669291 Validation Loss: 4.445005 Epoch: 13 Training Loss: 4.654486 Validation Loss: 4.404297 Validation loss decreased Epoch: 14 Training Loss: 4.640372 Validation Loss: 4.423028 Epoch: 15 Training Loss: 4.618795 Validation Loss: 4.377374 Validation loss decreased Epoch: 16 Training Loss: 4.637717 Validation Loss: 4.413249 Epoch: 17 Training Loss: 4.604815 Validation Loss: 4.365817 Validation loss decreased Epoch: 18 Training Loss: 4.590322 Validation Loss: 4.319419 Validation loss decreased Epoch: 19 Training Loss: 4.593138 Validation Loss: 4.318778 Validation loss decreased Epoch: 20 Training Loss: 4.575171 Validation Loss: 4.289941 Validation loss decreased Epoch: 21 Training Loss: 4.573715 Validation Loss: 4.253323 Validation loss decreased Epoch: 22 Training Loss: 4.575677 Validation Loss: 4.234813 Validation loss decreased Epoch: 23 Training Loss: 4.563436 Validation Loss: 4.362034 Epoch: 24 Training Loss: 4.545961 Validation Loss: 4.224214 Validation loss decreased Epoch: 25 Training Loss: 4.535799 Validation Loss: 4.340408 Epoch: 26 Training Loss: 4.522401 Validation Loss: 4.218665 Validation loss decreased Epoch: 27 Training Loss: 4.521233 Validation Loss: 4.251316 Epoch: 28 Training Loss: 4.503085 Validation Loss: 4.254240 Epoch: 29 Training Loss: 4.504185 Validation Loss: 4.099747 Validation loss decreased Epoch: 30 Training Loss: 4.475391 Validation Loss: 4.123901 Epoch: 31 Training Loss: 4.472204 Validation Loss: 4.158393 Epoch: 32 Training Loss: 4.449519 Validation Loss: 4.063731 Validation loss decreased Epoch: 33 Training Loss: 4.458735 Validation Loss: 4.122714 Epoch: 34 Training Loss: 4.449875 Validation Loss: 4.053971 Validation loss decreased Epoch: 35 Training Loss: 4.450149 Validation Loss: 4.110324 Epoch: 36 Training Loss: 4.439763 Validation Loss: 4.085478 Epoch: 37 Training Loss: 4.433944 Validation Loss: 4.067901 Epoch: 38 Training Loss: 4.403983 Validation Loss: 4.075586 Epoch: 39 Training Loss: 4.382439 Validation Loss: 4.016983 Validation loss decreased Epoch: 40 Training Loss: 4.397105 Validation Loss: 4.048471 Epoch: 41 Training Loss: 4.378785 Validation Loss: 4.008638 Validation loss decreased Epoch: 42 Training Loss: 4.368710 Validation Loss: 3.952538 Validation loss decreased Epoch: 43 Training Loss: 4.343922 Validation Loss: 3.927382 Validation loss decreased Epoch: 44 Training Loss: 4.344827 Validation Loss: 3.980352 Epoch: 45 Training Loss: 4.306264 Validation Loss: 3.950596 Epoch: 46 Training Loss: 4.329989 Validation Loss: 4.003421 Epoch: 47 Training Loss: 4.299798 Validation Loss: 3.930243 Epoch: 48 Training Loss: 4.306777 Validation Loss: 3.993624 Epoch: 49 Training Loss: 4.281907 Validation Loss: 3.957619 Epoch: 50 Training Loss: 4.261512 Validation Loss: 3.874834 Validation loss decreased Epoch: 51 Training Loss: 4.274705 Validation Loss: 3.835531 Validation loss decreased Epoch: 52 Training Loss: 4.234179 Validation Loss: 3.980312 Epoch: 53 Training Loss: 4.242634 Validation Loss: 3.837797 Epoch: 54 Training Loss: 4.240625 Validation Loss: 3.848797 Epoch: 55 Training Loss: 4.225395 Validation Loss: 3.757899 Validation loss decreased Epoch: 56 Training Loss: 4.207301 Validation Loss: 3.744814 Validation loss decreased Epoch: 57 Training Loss: 4.205212 Validation Loss: 3.781969 Epoch: 58 Training Loss: 4.196064 Validation Loss: 3.970837 Epoch: 59 Training Loss: 4.184660 Validation Loss: 3.800131 Epoch: 60 Training Loss: 4.180840 Validation Loss: 3.746898 Epoch: 61 Training Loss: 4.181384 Validation Loss: 3.637125 Validation loss decreased Epoch: 62 Training Loss: 4.164508 Validation Loss: 3.729608 Epoch: 63 Training Loss: 4.145414 Validation Loss: 3.731796 Epoch: 64 Training Loss: 4.140334 Validation Loss: 3.701779 Epoch: 65 Training Loss: 4.136867 Validation Loss: 3.722771 Epoch: 66 Training Loss: 4.120500 Validation Loss: 3.548561 Validation loss decreased Epoch: 67 Training Loss: 4.091952 Validation Loss: 3.656554 Epoch: 68 Training Loss: 4.105190 Validation Loss: 3.789697 Epoch: 69 Training Loss: 4.104043 Validation Loss: 3.772113 Epoch: 70 Training Loss: 4.096060 Validation Loss: 3.597370 Epoch: 71 Training Loss: 4.078194 Validation Loss: 3.455949 Validation loss decreased Epoch: 72 Training Loss: 4.060153 Validation Loss: 3.522030 Epoch: 73 Training Loss: 4.060252 Validation Loss: 3.785067 Epoch: 74 Training Loss: 4.044846 Validation Loss: 3.539994 Epoch: 75 Training Loss: 4.025530 Validation Loss: 3.751283 Epoch: 76 Training Loss: 4.028861 Validation Loss: 3.491355 Epoch: 77 Training Loss: 4.013661 Validation Loss: 3.534217 Epoch: 78 Training Loss: 4.014490 Validation Loss: 3.423345 Validation loss decreased Epoch: 79 Training Loss: 4.003842 Validation Loss: 3.462763 Epoch: 80 Training Loss: 3.959844 Validation Loss: 3.435017 Epoch: 81 Training Loss: 3.978312 Validation Loss: 3.470844 Epoch: 82 Training Loss: 3.978060 Validation Loss: 3.472360 Epoch: 83 Training Loss: 3.993455 Validation Loss: 3.374593 Validation loss decreased Epoch: 84 Training Loss: 3.961789 Validation Loss: 3.330303 Validation loss decreased Epoch: 85 Training Loss: 3.929086 Validation Loss: 3.379400 Epoch: 86 Training Loss: 3.941887 Validation Loss: 3.358326 Epoch: 87 Training Loss: 3.919664 Validation Loss: 3.523214 Epoch: 88 Training Loss: 3.913182 Validation Loss: 3.278949 Validation loss decreased Epoch: 89 Training Loss: 3.909550 Validation Loss: 3.295269 Epoch: 90 Training Loss: 3.921831 Validation Loss: 3.300904 Epoch: 91 Training Loss: 3.881533 Validation Loss: 3.413122 Epoch: 92 Training Loss: 3.866074 Validation Loss: 3.316072 Epoch: 93 Training Loss: 3.876955 Validation Loss: 3.280539 Epoch: 94 Training Loss: 3.873457 Validation Loss: 3.300109 Epoch: 95 Training Loss: 3.867235 Validation Loss: 3.233773 Validation loss decreased Epoch: 96 Training Loss: 3.836950 Validation Loss: 3.219671 Validation loss decreased Epoch: 97 Training Loss: 3.836718 Validation Loss: 3.310886 Epoch: 98 Training Loss: 3.827658 Validation Loss: 3.214798 Validation loss decreased Epoch: 99 Training Loss: 3.814292 Validation Loss: 3.220006 Epoch: 100 Training Loss: 3.795596 Validation Loss: 3.185079 Validation loss decreased Epoch: 101 Training Loss: 3.820279 Validation Loss: 3.130639 Validation loss decreased Epoch: 102 Training Loss: 3.797202 Validation Loss: 3.169786 Epoch: 103 Training Loss: 3.766062 Validation Loss: 3.148008 Epoch: 104 Training Loss: 3.771590 Validation Loss: 3.307010 Epoch: 105 Training Loss: 3.779384 Validation Loss: 3.242149 Epoch: 106 Training Loss: 3.760721 Validation Loss: 3.078890 Validation loss decreased Epoch: 107 Training Loss: 3.746799 Validation Loss: 3.181673 Epoch: 108 Training Loss: 3.717305 Validation Loss: 3.174562 Epoch: 109 Training Loss: 3.710575 Validation Loss: 3.247369 Epoch: 110 Training Loss: 3.710409 Validation Loss: 3.013749 Validation loss decreased Epoch: 111 Training Loss: 3.742277 Validation Loss: 3.281442 Epoch: 112 Training Loss: 3.699625 Validation Loss: 3.067908 Epoch: 113 Training Loss: 3.687034 Validation Loss: 3.028711 Epoch: 114 Training Loss: 3.688787 Validation Loss: 3.046111 Epoch: 115 Training Loss: 3.671370 Validation Loss: 2.945843 Validation loss decreased Epoch: 116 Training Loss: 3.668288 Validation Loss: 3.029988 Epoch: 117 Training Loss: 3.671968 Validation Loss: 2.942671 Validation loss decreased Epoch: 118 Training Loss: 3.637515 Validation Loss: 2.966767 Epoch: 119 Training Loss: 3.656196 Validation Loss: 2.980201 Epoch: 120 Training Loss: 3.618127 Validation Loss: 2.895288 Validation loss decreased Epoch: 121 Training Loss: 3.633639 Validation Loss: 3.080709 Epoch: 122 Training Loss: 3.619493 Validation Loss: 2.895058 Validation loss decreased Epoch: 123 Training Loss: 3.613932 Validation Loss: 2.858034 Validation loss decreased Epoch: 124 Training Loss: 3.582406 Validation Loss: 2.900588 Epoch: 125 Training Loss: 3.587784 Validation Loss: 2.891124 Epoch: 126 Training Loss: 3.595622 Validation Loss: 3.069089 Epoch: 127 Training Loss: 3.573969 Validation Loss: 2.923118 Epoch: 128 Training Loss: 3.550453 Validation Loss: 3.014174 Epoch: 129 Training Loss: 3.548266 Validation Loss: 2.794587 Validation loss decreased Epoch: 130 Training Loss: 3.551932 Validation Loss: 2.781321 Validation loss decreased Epoch: 131 Training Loss: 3.524760 Validation Loss: 2.860888 Epoch: 132 Training Loss: 3.519140 Validation Loss: 2.829141 Epoch: 133 Training Loss: 3.494403 Validation Loss: 2.713929 Validation loss decreased Epoch: 134 Training Loss: 3.495821 Validation Loss: 2.840982 Epoch: 135 Training Loss: 3.474487 Validation Loss: 2.766525 Epoch: 136 Training Loss: 3.489440 Validation Loss: 2.690353 Validation loss decreased Epoch: 137 Training Loss: 3.464033 Validation Loss: 2.920819 Epoch: 138 Training Loss: 3.496338 Validation Loss: 2.788830 Epoch: 139 Training Loss: 3.467623 Validation Loss: 2.953789 Epoch: 140 Training Loss: 3.463534 Validation Loss: 2.681939 Validation loss decreased Epoch: 141 Training Loss: 3.447732 Validation Loss: 3.431767 Epoch: 142 Training Loss: 3.452376 Validation Loss: 2.713044 Epoch: 143 Training Loss: 3.449492 Validation Loss: 2.611570 Validation loss decreased Epoch: 144 Training Loss: 3.417523 Validation Loss: 2.709403 Epoch: 145 Training Loss: 3.391949 Validation Loss: 2.640990 Epoch: 146 Training Loss: 3.407347 Validation Loss: 2.837085 Epoch: 147 Training Loss: 3.374486 Validation Loss: 2.657837 Epoch: 148 Training Loss: 3.379858 Validation Loss: 2.659526 Epoch: 149 Training Loss: 3.386528 Validation Loss: 2.762592 Epoch: 150 Training Loss: 3.375216 Validation Loss: 2.781975 Epoch: 151 Training Loss: 3.344366 Validation Loss: 2.556478 Validation loss decreased Epoch: 152 Training Loss: 3.345098 Validation Loss: 3.127317 Epoch: 153 Training Loss: 3.348919 Validation Loss: 2.562656 Epoch: 154 Training Loss: 3.337797 Validation Loss: 2.721217 Epoch: 155 Training Loss: 3.320682 Validation Loss: 2.529296 Validation loss decreased Epoch: 156 Training Loss: 3.296458 Validation Loss: 2.621260 Epoch: 157 Training Loss: 3.290199 Validation Loss: 2.688381 Epoch: 158 Training Loss: 3.298633 Validation Loss: 2.517078 Validation loss decreased Epoch: 159 Training Loss: 3.253061 Validation Loss: 2.532502 Epoch: 160 Training Loss: 3.251987 Validation Loss: 2.511626 Validation loss decreased Epoch: 161 Training Loss: 3.232432 Validation Loss: 2.540846 Epoch: 162 Training Loss: 3.224229 Validation Loss: 2.490450 Validation loss decreased Epoch: 163 Training Loss: 3.250954 Validation Loss: 2.484232 Validation loss decreased Epoch: 164 Training Loss: 3.226571 Validation Loss: 2.409071 Validation loss decreased Epoch: 165 Training Loss: 3.224269 Validation Loss: 2.515541 Epoch: 166 Training Loss: 3.191483 Validation Loss: 2.378853 Validation loss decreased Epoch: 167 Training Loss: 3.180362 Validation Loss: 2.504711 Epoch: 168 Training Loss: 3.183510 Validation Loss: 2.452769 Epoch: 169 Training Loss: 3.155405 Validation Loss: 2.398896 Epoch: 170 Training Loss: 3.177006 Validation Loss: 2.318317 Validation loss decreased Epoch: 171 Training Loss: 3.166754 Validation Loss: 2.511092 Epoch: 172 Training Loss: 3.143456 Validation Loss: 2.440742 Epoch: 173 Training Loss: 3.148980 Validation Loss: 2.412758 Epoch: 174 Training Loss: 3.099923 Validation Loss: 2.371680 Epoch: 175 Training Loss: 3.135813 Validation Loss: 2.895849 Epoch: 176 Training Loss: 3.109516 Validation Loss: 2.280192 Validation loss decreased Epoch: 177 Training Loss: 3.105808 Validation Loss: 2.390676 Epoch: 178 Training Loss: 3.099929 Validation Loss: 2.390148 Epoch: 179 Training Loss: 3.089752 Validation Loss: 2.472869 Epoch: 180 Training Loss: 3.037262 Validation Loss: 2.277416 Validation loss decreased Epoch: 181 Training Loss: 3.069418 Validation Loss: 2.372555 Epoch: 182 Training Loss: 3.026059 Validation Loss: 2.421957 Epoch: 183 Training Loss: 3.019118 Validation Loss: 2.257601 Validation loss decreased Epoch: 184 Training Loss: 3.020041 Validation Loss: 2.464229 Epoch: 185 Training Loss: 3.037134 Validation Loss: 2.272667 Epoch: 186 Training Loss: 2.987671 Validation Loss: 2.234247 Validation loss decreased Epoch: 187 Training Loss: 2.980516 Validation Loss: 2.156774 Validation loss decreased Epoch: 188 Training Loss: 3.018265 Validation Loss: 2.302144 Epoch: 189 Training Loss: 2.979907 Validation Loss: 2.244605 Epoch: 190 Training Loss: 2.972162 Validation Loss: 2.270026 Epoch: 191 Training Loss: 2.945148 Validation Loss: 2.330346 Epoch: 192 Training Loss: 2.943057 Validation Loss: 2.335621 Epoch: 193 Training Loss: 2.941078 Validation Loss: 2.207439 Epoch: 194 Training Loss: 2.915108 Validation Loss: 2.133189 Validation loss decreased Epoch: 195 Training Loss: 2.939658 Validation Loss: 2.259618 Epoch: 196 Training Loss: 2.906115 Validation Loss: 2.108557 Validation loss decreased Epoch: 197 Training Loss: 2.896769 Validation Loss: 2.115292 Epoch: 198 Training Loss: 2.898250 Validation Loss: 2.083306 Validation loss decreased Epoch: 199 Training Loss: 2.860920 Validation Loss: 2.107281 Epoch: 200 Training Loss: 2.849667 Validation Loss: 2.169739 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 2.097551 Test Accuracy: 40% (338/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133, bias=True) fc_parameters = model_transfer.fc.parameters() for param in fc_parameters: param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I have used resnet50 (residual network with 50 layers) for this model as it is well suited to image classification. Residual learning works well with deep models. Residual learning helps overcome issues where neural networks become harder to train and can lose accuracy as they get deeper. The connections between layers can be made between all layers and so it can skip weight layers if they are not as relevant for training. The model should be well trained based on the data set and should not need further training and so I stopped gradient updates for the parameters. Then I created a final layer to the network to use for classifying the images. This final layer will need updates to the parameters so I allowed a gradient update. Everything runs through cuda as this is far quicker with current technology. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(),lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(50, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.786988 Validation Loss: 4.580501 Validation loss decreased Epoch: 2 Training Loss: 4.539979 Validation Loss: 4.287271 Validation loss decreased Epoch: 3 Training Loss: 4.307267 Validation Loss: 3.999209 Validation loss decreased Epoch: 4 Training Loss: 4.098400 Validation Loss: 3.764347 Validation loss decreased Epoch: 5 Training Loss: 3.904691 Validation Loss: 3.476658 Validation loss decreased Epoch: 6 Training Loss: 3.724012 Validation Loss: 3.239848 Validation loss decreased Epoch: 7 Training Loss: 3.541661 Validation Loss: 3.019851 Validation loss decreased Epoch: 8 Training Loss: 3.378075 Validation Loss: 2.840148 Validation loss decreased Epoch: 9 Training Loss: 3.220987 Validation Loss: 2.686744 Validation loss decreased Epoch: 10 Training Loss: 3.073808 Validation Loss: 2.494994 Validation loss decreased Epoch: 11 Training Loss: 2.960559 Validation Loss: 2.367091 Validation loss decreased Epoch: 12 Training Loss: 2.852550 Validation Loss: 2.223589 Validation loss decreased Epoch: 13 Training Loss: 2.721879 Validation Loss: 2.120086 Validation loss decreased Epoch: 14 Training Loss: 2.622634 Validation Loss: 1.986233 Validation loss decreased Epoch: 15 Training Loss: 2.536847 Validation Loss: 1.868346 Validation loss decreased Epoch: 16 Training Loss: 2.449677 Validation Loss: 1.794170 Validation loss decreased Epoch: 17 Training Loss: 2.365410 Validation Loss: 1.741188 Validation loss decreased Epoch: 18 Training Loss: 2.291399 Validation Loss: 1.617702 Validation loss decreased Epoch: 19 Training Loss: 2.227143 Validation Loss: 1.583718 Validation loss decreased Epoch: 20 Training Loss: 2.151782 Validation Loss: 1.478498 Validation loss decreased Epoch: 21 Training Loss: 2.094808 Validation Loss: 1.400378 Validation loss decreased Epoch: 22 Training Loss: 2.056217 Validation Loss: 1.440227 Epoch: 23 Training Loss: 1.992340 Validation Loss: 1.331928 Validation loss decreased Epoch: 24 Training Loss: 1.954324 Validation Loss: 1.272097 Validation loss decreased Epoch: 25 Training Loss: 1.904187 Validation Loss: 1.260360 Validation loss decreased Epoch: 26 Training Loss: 1.892011 Validation Loss: 1.190600 Validation loss decreased Epoch: 27 Training Loss: 1.811584 Validation Loss: 1.180988 Validation loss decreased Epoch: 28 Training Loss: 1.781440 Validation Loss: 1.128915 Validation loss decreased Epoch: 29 Training Loss: 1.778552 Validation Loss: 1.135624 Epoch: 30 Training Loss: 1.753682 Validation Loss: 1.076376 Validation loss decreased Epoch: 31 Training Loss: 1.705632 Validation Loss: 1.037510 Validation loss decreased Epoch: 32 Training Loss: 1.666305 Validation Loss: 1.032672 Validation loss decreased Epoch: 33 Training Loss: 1.646952 Validation Loss: 1.002062 Validation loss decreased Epoch: 34 Training Loss: 1.618981 Validation Loss: 0.986919 Validation loss decreased Epoch: 35 Training Loss: 1.592249 Validation Loss: 0.936898 Validation loss decreased Epoch: 36 Training Loss: 1.543365 Validation Loss: 0.955394 Epoch: 37 Training Loss: 1.541680 Validation Loss: 0.919345 Validation loss decreased Epoch: 38 Training Loss: 1.560386 Validation Loss: 0.914148 Validation loss decreased Epoch: 39 Training Loss: 1.519469 Validation Loss: 0.893452 Validation loss decreased Epoch: 40 Training Loss: 1.482966 Validation Loss: 0.868448 Validation loss decreased Epoch: 41 Training Loss: 1.461357 Validation Loss: 0.856630 Validation loss decreased Epoch: 42 Training Loss: 1.453572 Validation Loss: 0.817398 Validation loss decreased Epoch: 43 Training Loss: 1.433886 Validation Loss: 0.830203 Epoch: 44 Training Loss: 1.434138 Validation Loss: 0.830116 Epoch: 45 Training Loss: 1.404533 Validation Loss: 0.794161 Validation loss decreased Epoch: 46 Training Loss: 1.400703 Validation Loss: 0.773033 Validation loss decreased Epoch: 47 Training Loss: 1.360830 Validation Loss: 0.769219 Validation loss decreased Epoch: 48 Training Loss: 1.381080 Validation Loss: 0.742612 Validation loss decreased Epoch: 49 Training Loss: 1.365292 Validation Loss: 0.751922 Epoch: 50 Training Loss: 1.354821 Validation Loss: 0.748409 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.750902 Test Accuracy: 84% (710/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code from PIL import Image import torchvision.transforms as transforms ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = loaders_transfer.copy() # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): global model_transfer global transform_pipeline # load the image and return the predicted breed img = Image.open(img_path).convert('RGB') transform = transforms.Compose([transforms.Resize(224),transforms.ToTensor(),transforms.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225))]) img = transform(img) # Removing transparent, alpha img = img.unsqueeze(0) if use_cuda: model_transfer = model_transfer.cuda() img = img.cuda() model_transfer.eval() idx = torch.argmax(model_transfer(img)) return class_names[idx] for img_file in os.listdir('data/dogImages/test/001.Affenpinscher'): img_path = os.path.join('data/dogImages/test/001.Affenpinscher', img_file) prediction = predict_breed_transfer(img_path) print("image_file_name: {0}, \t prediction breed: {1}".format(img_path, prediction)) ###Output image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00003.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00023.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00036.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00047.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00048.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00058.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00071.jpg, prediction breed: Affenpinscher image_file_name: data/dogImages/test/001.Affenpinscher/Affenpinscher_00078.jpg, prediction breed: Affenpinscher ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code def run_app(img_path, title="", normalize=True): prediction = predict_breed_transfer(img_path) if dog_detector(img_path) > 0: img = Image.open(img_path) plt.imshow(img) plt.show() print("image_file_name: {0}, \t prediction breed: {1}".format(img_path, prediction)) elif face_detector(img_path) > 0: img = Image.open(img_path) plt.imshow(img) plt.show() print("image_file_name: {0}, \t human that looks like a {1}".format(img_path, prediction)) else: img = Image.open(img_path) plt.imshow(img) plt.show() print("neither human or dog".format(img_path)) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)It appears to struggle with images that differ from the training set such as an image of 2 dogs. Or a puppy rather than an adult dog. Training the model on a wider set of data could help with this.Alternatively allowing the trained model to be further at the resnet architecture could help with classifying the slight variances in the test images.Further image augmentation would also be useful to improve the accuracy of the model and some I would consider if I was to develop this further. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below import os from PIL import Image for img_file in os.listdir('test'): img_path = os.path.join('test', img_file) run_app(img_path) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector __Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# # TODO: Test the performance of the face_detector algorithm # on the images in human_files_short and dog_files_short. human_files_human_face_count = 0 dog_files_human_face_count = 0 for human_img in human_files_short: if face_detector(human_img): human_files_human_face_count += 1 for dog_img in dog_files_short: if face_detector(dog_img): dog_files_human_face_count += 1 print(f"Percentage of the first 100 images in human_files " f"{human_files_human_face_count/len(human_files_short) * 100}% \n" f"Percentage of the first 100 images in dog_files " f"{dog_files_human_face_count/len(dog_files_short) * 100}%") ###Output Percentage of the first 100 images in human_files 98.0% Percentage of the first 100 images in dog_files 18.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. face_cascade_default = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml') # returns "True" if face is detected in image stored at img_path def face_detector_default(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade_default.detectMultiScale(gray) return len(faces) > 0 human_files_short_human_face_count_default = 0 dog_files_short_human_face_count_default = 0 for human_img in human_files_short: if face_detector_default(human_img): human_files_short_human_face_count_default += 1 for dog_img in dog_files_short: if face_detector_default(dog_img): dog_files_short_human_face_count_default += 1 print(f"Percentage of detected human faces in human_files_short with haarcascade_frontalface_default " f"{human_files_short_human_face_count_default/len(human_files_short) * 100}% \n" f"Percentage of detected human faces in dog_files_short with haarcascade_frontalface_default " f"{dog_files_short_human_face_count_default/len(dog_files_short) * 100}%") ###Output Percentage of detected human faces in human_files_short with haarcascade_frontalface_default 99.0% Percentage of detected human faces in dog_files_short with haarcascade_frontalface_default 59.0% ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](https://pytorch.org/vision/stable/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' # TODO: Complete the function. # Load and pre-process an image from the given img_path # Return the *index* of the predicted class for that image img_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img = Image.open(img_path) img = img_transforms(img).float() img = img.requires_grad_(True) img = img.unsqueeze(0) if use_cuda: img = img.cuda() output = VGG16(img) if use_cuda: output = output.cpu() return np.argmax(output.detach().numpy()) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred_index = VGG16_predict(img_path) return pred_index >= 151 and pred_index <= 268 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_files_short_dog_face_count = 0 dog_files_short_dog_face_count = 0 for human_img in human_files_short: if dog_detector(human_img): human_files_short_dog_face_count += 1 for dog_img in dog_files_short: if dog_detector(dog_img): dog_files_short_dog_face_count += 1 print(f"Percentage of detected dog faces in human_files_short " f"{human_files_short_dog_face_count/len(human_files_short) * 100}% \n" f"Percentage of detected dog faces in dog_files_short " f"{dog_files_short_dog_face_count/len(dog_files_short) * 100}%") ###Output Percentage of detected dog faces in human_files_short 2.0% Percentage of detected dog faces in dog_files_short 93.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ShuffleNet = models.alexnet(pretrained=True) if use_cuda: ShuffleNet = ShuffleNet.cuda() def shufflenet_predict(img_path): ''' Use pre-trained ShuffleNet model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to ShuffleNet model's prediction ''' img_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) img = Image.open(img_path) img = img_transforms(img).float() img = img.requires_grad_(True) img = img.unsqueeze(0) output = ShuffleNet(img) return np.argmax(output.detach().numpy()) def dog_detector_shuffle(img_path): pred_index = shufflenet_predict(img_path) return pred_index >= 151 and pred_index <= 268 human_files_short_dog_face_count_shuffle = 0 dog_files_short_dog_face_count_shuffle = 0 for human_img in human_files_short: if dog_detector_shuffle(human_img): human_files_short_dog_face_count_shuffle += 1 for dog_img in dog_files_short: if dog_detector_shuffle(dog_img): dog_files_short_dog_face_count_shuffle += 1 print(f"Percentage of detected dog faces in human_files_short with shufflenet " f"{human_files_short_dog_face_count_shuffle/len(human_files_short) * 100}% \n" f"Percentage of detected dog faces in dog_files_short with shufflenet " f"{dog_files_short_dog_face_count_shuffle/len(dog_files_short) * 100}%") ###Output Percentage of detected dog faces in human_files_short with shufflenet 1.0% Percentage of detected dog faces in dog_files_short with shufflenet 93.0% ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes import torch import torchvision.models as models import torchvision.transforms as transforms from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True use_cuda = torch.cuda.is_available() train_transforms = transforms.Compose([transforms.RandomRotation(60), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image_path = 'dogImages' if not os.path.isdir(image_path): image_path = '/' + image_path train_path = os.path.join(image_path, 'train') val_path = os.path.join(image_path, 'valid') test_path = os.path.join(image_path, 'test') train_dataset = datasets.ImageFolder(train_path, train_transforms) val_dataset = datasets.ImageFolder(val_path, train_transforms) test_dataset = datasets.ImageFolder(test_path, test_transforms) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=True) loaders_scratch = {'train': train_loader, 'valid': val_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F def calc_w_conv_out(conv, pool_stride = 1): return (((conv["W"] - conv["F"] + (2*conv["P"])) / conv["S"]) + 1) / pool_stride conv1_w_in = 224 conv1 = {"W": conv1_w_in, "D": 3, "K": 16, "F": 3, "P": 1, "S": 1} conv1_w_out = calc_w_conv_out(conv1) conv2 = {"W": conv1_w_out, "D": conv1["K"], "K": 24, "F": 3, "P": 1, "S": 1} conv2_w_out = calc_w_conv_out(conv2, 7) conv3 = {"W": conv2_w_out, "D": conv2["K"], "K": 32, "F": 3, "P": 1, "S": 1} conv3_w_out = calc_w_conv_out(conv3) conv4 = {"W": conv3_w_out, "D": conv3["K"], "K": 48, "F": 3, "P": 1, "S": 1} conv4_w_out = calc_w_conv_out(conv4, 4) conv5 = {"W": conv4_w_out, "D": conv4["K"], "K": 56, "F": 3, "P": 1, "S": 1} conv5_w_out = calc_w_conv_out(conv5) conv6 = {"W": conv5_w_out, "D": conv5["K"], "K": 64, "F": 3, "P": 1, "S": 1} conv6_w_out = calc_w_conv_out(conv6, 4) conv7 = {"W": conv6_w_out, "D": conv6["K"], "K": 176, "F": 3, "P": 1, "S": 1} conv7_w_out = calc_w_conv_out(conv7) conv8 = {"W": conv7_w_out, "D": conv7["K"], "K": 192, "F": 3, "P": 1, "S": 1} conv8_w_out = calc_w_conv_out(conv8, 2) conv9 = {"W": conv8_w_out, "D": conv8["K"], "K": 208, "F": 3, "P": 1, "S": 1} conv9_w_out = calc_w_conv_out(conv9) conv10 = {"W": conv9_w_out, "D": conv9["K"], "K": 224, "F": 3, "P": 1, "S": 1} conv10_w_out = calc_w_conv_out(conv10, 2) conv_features_out = conv6_w_out**2 * conv6["K"] #print(conv1_w_out, conv2_w_out, conv3_w_out, conv4_w_out, conv5_w_out, # conv6_w_out, conv7_w_out, conv8_w_out, conv9_w_out, conv10_w_out, conv_features_out) print(conv1_w_out, conv2_w_out, conv3_w_out, conv4_w_out, conv_features_out) def make_nn_conv(conv): return nn.Conv2d(conv["D"], conv["K"], conv["F"], padding=conv["P"], stride=conv["S"]) # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN ## Layer 1 self.conv1 = make_nn_conv(conv1) self.conv2 = make_nn_conv(conv2) ## Layer 2 self.conv3 = make_nn_conv(conv3) self.conv4 = make_nn_conv(conv4) ## Layer 3 self.conv5 = make_nn_conv(conv5) self.conv6 = make_nn_conv(conv6) ## Layer 4 #self.conv7 = make_nn_conv(conv7) #self.conv8 = make_nn_conv(conv8) ## Layer 5 #self.conv9 = make_nn_conv(conv9) #self.conv10 = make_nn_conv(conv10) ## Layer 6 self.fc1 = nn.Linear(int(conv_features_out), 133) ## Layer 7 #self.fc2 = nn.Linear(4096, 256) ## Layer 8 #self.fc3 = nn.Linear(256, 133) def forward(self, x): ## Define forward behavior batch_size = x.size()[0] # layer 1 x = F.dropout(F.relu(self.conv1(x)), 0.2) x = F.dropout(F.max_pool2d(F.relu(self.conv2(x)), 7, 7), 0.2) # layer 2 x = F.dropout(F.relu(self.conv3(x)), 0.2) x = F.dropout(F.max_pool2d(F.relu(self.conv4(x)), 4, 4), 0.2) # layer 3 x = F.dropout(F.relu(self.conv5(x)), 0.2) x = F.dropout(F.max_pool2d(F.relu(self.conv6(x)), 4, 4), 0.2) # layer 4 #x = F.dropout(F.relu(self.conv7(x)), 0.2) #x = F.dropout(F.max_pool2d(F.relu(self.conv8(x)), 2, 2), 0.2) # layer 5 #x = F.dropout(F.relu(self.conv9(x)), 0.2) #x = F.dropout(F.max_pool2d(F.relu(self.conv10(x)), 2, 2), 0.2) x = x.view(batch_size, -1) #x = F.dropout(F.relu(self.fc1(x)), 0.2) #x = F.dropout(F.relu(self.fc2(x)), 0.2) #x = F.log_softmax(self.fc3(x)) x = self.fc1(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() model_scratch ###Output 224.0 32.0 32.0 8.0 256.0 ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(1, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 2 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 3 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 4 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 5 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 6 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 7 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 8 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 9 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 10 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 11 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 12 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 13 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 14 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 15 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 16 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 17 Training Loss: 0.000000 Validation Loss: 0.000000 Epoch: 18 Training Loss: 0.000000 Validation Loss: 0.000000 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = {'train': train_loader, 'valid': val_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg19(pretrained=True) for param in model_transfer.features.parameters(): param.requires_grad_(False) in_features = model_transfer.classifier[6].in_features output_fc_layer = nn.Linear(in_features, 133) model_transfer.classifier[6] = output_fc_layer if use_cuda: model_transfer = model_transfer.cuda() model_transfer ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model # model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer = train(50, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. model_transfer.load_state_dict(torch.load('model_transfer.pt')) # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed model_transfer.eval() img = Image.open(img_path) img = test_transforms(img).float() img = img.requires_grad_(True) img = img.unsqueeze(0) if use_cuda: img = img.cuda() output = model_transfer(img) if use_cuda: output = output.cpu() return class_names[np.argmax(output.detach().numpy())] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither found_dog = dog_detector(img_path) found_human = face_detector(img_path) img = cv2.imread(img_path) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) if not found_dog and not found_human: print("Hmmm, hold on a minute...") plt.show() print("I don't know what I'm looking at...! Can you try again?\n\n") else: pred_breed = predict_breed_transfer(img_path) print(f"Hello, {'dog' if found_dog else 'human'}!") plt.show() print(f"You {'are' if found_dog else 'look like'} a...") print(f"{pred_breed}\n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)Percentage of the first 100 images in human_files have a detected human face - 98%Percentage of the first 100 images in dog_files have a detected human face - 17% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_detected_in_human = 0 human_detected_in_dog = 0 for img_path in tqdm(human_files_short): if face_detector(img_path): human_detected_in_human += 1 print ("\nAnswer 1: ", human_detected_in_human) for img_path in tqdm(dog_files_short): if face_detector(img_path): human_detected_in_dog += 1 print ("\nAnswer 2: ", human_detected_in_dog) ###Output 100%|██████████| 100/100 [00:02<00:00, 34.97it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 93178951.54it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) loader = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image global loader image = Image.open(img_path) image = loader(image).float() image = torch.autograd.Variable(image, requires_grad=True) image = image.unsqueeze(0) if use_cuda: image = image.cuda() classes = VGG16(image) val, idx = torch.max(classes, 1) return idx[0] # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return index >= 151 and index <= 268 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ percentage of the images in human_files_short have a detected dog = 0%percentage of the images in dog_files_short have a detected dog = 100% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_detected_in_human = 0 dog_detected_in_dog = 0 for img_path in tqdm(dog_files_short): if dog_detector(img_path): dog_detected_in_dog += 1 print ("\nAnswer 2: ", dog_detected_in_dog) for img_path in tqdm(human_files_short): if dog_detector(img_path): dog_detected_in_human += 1 print ("\nAnswer 1: ", dog_detected_in_human) ###Output 100%|██████████| 100/100 [01:21<00:00, 1.41it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torch normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) loader = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes def data_loader(path): global loader data = datasets.ImageFolder(path, transform=loader) return torch.utils.data.DataLoader(data, batch_size=20, shuffle=True, num_workers=0) loaders_scratch = {} loaders_scratch['train'] = data_loader("/data/dog_images/train") loaders_scratch['valid'] = data_loader("/data/dog_images/valid") loaders_scratch['test'] = data_loader("/data/dog_images/test") dataiter = iter(loaders_scratch['test']) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display print (max(labels)) ###Output tensor(126) ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- Loader is same as one used before: Resize(256), CenterCrop(224), Normalize. Values are base d on example provided in Pytorch documentation.- No augmentation just for testing for now. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.conv4 = nn.Conv2d(256, 512, 3, padding=1) self.fc = nn.Linear(14*14*512, 1024) self.fc2 = nn.Linear(1024, 256) self.fc3 = nn.Linear(256, 133) self.pool = nn.MaxPool2d(2, 2) self.dropout = nn.Dropout(0.5) def forward(self, x): ## Define forward behavior # print (x.shape) x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) # x = self.dropout(x) # print (x.shape) x = x.view(x.shape[0], -1) # print (x.shape) # x = self.dropout(x) x = F.relu(self.fc(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Roughly based on VGG16 - few layers of increasing depth (doubling at each step) and maxpool to reduce width, height, followed by flattening and a fully connected layer. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from tqdm import tqdm def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(tqdm(loaders['train'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer_scratch.zero_grad() output = model_scratch(data) # print (output) # print (target) # 1/0 loss = criterion_scratch(output, target) loss.backward() optimizer_scratch.step() # train_loss += loss.item()*data.size(0) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) print("train_loss:", train_loss) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(tqdm(loaders['valid'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model_scratch(data) # calculate the batch loss loss = criterion_scratch(output, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # train_loss = train_loss/len(loaders['train'].dataset) # valid_loss = valid_loss/len(loaders['valid'].dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model_scratch.state_dict(), 'model_scratch.pt') valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.059096 Test Accuracy: 8% (73/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import os from torchvision import datasets import torch import torchvision.transforms as transforms normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) loader = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes def data_loader(path): global loader data = datasets.ImageFolder(path, transform=loader) return torch.utils.data.DataLoader(data, batch_size=20, shuffle=True, num_workers=0) loaders_transfer = {} loaders_transfer['train'] = data_loader("/data/dog_images/train") loaders_transfer['valid'] = data_loader("/data/dog_images/valid") loaders_transfer['test'] = data_loader("/data/dog_images/test") ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn # VGG16.classifier ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.classifier[6] = nn.Sequential( nn.Linear(4096, 256), nn.ReLU(), nn.Dropout(0.4), nn.Linear(256, 133) ) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I fixed the early layers (all but last one) because they capture broad details like edges which are applicable to any image classification problem. The last layer is replaced by 2 fully connected linear layers with output dimension same as number of dog breeds or more. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_transfer = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_transfer = optim.SGD(model_transfer.classifier[6].parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from tqdm import tqdm import numpy as np def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(tqdm(loaders['train'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer_transfer.zero_grad() output = model_transfer(data) # print (output) # print (target) # 1/0 loss = criterion_transfer(output, target) loss.backward() optimizer_transfer.step() # train_loss += loss.item()*data.size(0) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) print("train_loss:", train_loss) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(tqdm(loaders['valid'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model_transfer(data) # calculate the batch loss loss = criterion_transfer(output, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # train_loss = train_loss/len(loaders['train'].dataset) # valid_loss = valid_loss/len(loaders['valid'].dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model_transfer.state_dict(), 'model_transfer.pt') valid_loss_min = valid_loss # return trained model return model # train the model model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.457433 Test Accuracy: 85% (717/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code from PIL import Image import torchvision.transforms as transforms ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = loaders_transfer.copy() # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) loader = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) image = Image.open(img_path) image = loader(image).float() image = torch.autograd.Variable(image, requires_grad=True) image = image.unsqueeze(0) model_transfer.eval() if use_cuda: image = image.cuda() classes = model_transfer(image) model_transfer.train() val, idx = torch.max(classes, 1) return class_names[idx[0]] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) > 0: breed = predict_breed_transfer(img_path) plt.imshow(Image.open(img_path)) plt.show() print('Human. Dog breed is ' + breed) elif dog_detector(img_path): breed = predict_breed_transfer(img_path) plt.imshow(Image.open(img_path)) plt.show() print('Dog. breed is ' + breed) else: plt.imshow(Image.open(img_path)) plt.show() print('None.') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Output for dog prediction is quite accurate but for dog breed resembling humans, it's not quite accurate. Some ideas for better improvement - 1. More data with more classes. Since humans need not necessarily resemble closely to anyone in 133 classes of dog breed, having more breeds might provide better resemblence.2. Adding human faces into training data with a known resembling dog breed. This can train network to take into account more humanly images.3. More complex transfer learning for dog prediction. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 0 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. accuracy_on_human_files = np.mean([face_detector(img) for img in human_files_short ]) * 100 print(f"Accuracy on 100 humans: {accuracy_on_human_files:.2f} %") not_detected_humans = [img for img in human_files_short if not face_detector(img)] img = cv2.imread(not_detected_humans[0]) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): # Read the image img = cv2.imread(img_path) # Transform to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Detect face faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)* The percent of human faces detected in the humans files were 98.0%.* The percent of human faces detected in the dog files were 17.0% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# # Create array to store results human=np.zeros((100,1)) dog=np.zeros((100,1)) # iterate over the list for x in range(len(human_files_short)): human[x]=face_detector(human_files_short[x]) dog[x]=face_detector(dog_files_short[x]) # Results print('The percent of human faces detected in the humans files were {}%'.format(sum(human)[0])) print('The percent of human faces detected in the dog files were {}%'.format(sum(dog)[0])) ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output The percent of human faces detected in the humans files were 98.0% The percent of human faces detected in the dog files were 17.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### For this section, I am going to use a "Multi-Task Cascaded Convolutional Neural Networks" ### See https://arxiv.org/abs/1604.02878 for details ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. # I can run this on this platform but it is an alternative using deep learning ''' from mtcnn.mtcnn import MTCNN def face_detector_dl(img_path): # Read the image img = cv2.imread(img_path) # Transform to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Detect face faces = face_detector.detect_faces(gray) return len(faces) > 0 # Now the same code than before -------------------- # Create a face detector face_detector = MTCNN() # Create array to store results human=np.zeros((100,1)) dog=np.zeros((100,1)) # iterate over the list for x in range(len(human_files_short)): human[x]=face_detector_dl(human_files_short[x]) dog[x]=face_detector_dl(dog_files_short[x]) # Results print('The percent of human faces detected in the humans files were {}%'.format(sum(human)[0])) print('The percent of human faces detected in the dog files were {}%'.format(sum(dog)[0])) ''' ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:06<00:00, 87652277.78it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def DNN_predict(img_path,model): ''' Use pre-trained model to obtain index corresponding to predicted ImageNet class for image at specified path Note: I modified a little bid this function to input also the model (this is more efficient and generalizable) Args: img_path: path to an image model: pre-trained model to predict the class of the image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # Load Image image = Image.open(img_path) # Create a function to Transform to tensor and normalize transformation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(244), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # Transformation image_tensor = transformation(image).float() image_tensor = image_tensor.unsqueeze_(0) # Prediction prediction= model(image_tensor) # Argument (classe) classe = prediction.data.numpy().argmax() return classe # predicted class index # Test the function with img_path=dog_files[2] # Create a vgg16 model vgg16 = models.vgg16(pretrained=True) DNN_predict(img_path,vgg16) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path,model): ## TODO: Complete the function. # Use the net VGG16 to predict the category prediction = DNN_predict(img_path,model) return (prediction>=151) & (prediction<=268) # true/false # Test the function with img_path=dog_files[2] dog_detector(img_path,vgg16) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ * The percent of dogs detected in the humans files were 0.0%.* The percent of dogs detected in the dog files were 100.0% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. # Create array to store results human=np.zeros((100,1)) dog=np.zeros((100,1)) # iterate over the list for x in range(len(human_files_short)): human[x]=dog_detector(human_files_short[x],vgg16) dog[x]=dog_detector(dog_files_short[x],vgg16) # Results print('The percent of dogs detected in the humans files were {}%'.format(sum(human)[0])) print('The percent of dogs detected in the dog files were {}%'.format(sum(dog)[0])) ###Output The percent of dogs detected in the humans files were 1.0% The percent of dogs detected in the dog files were 100.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ## I am going to prove with another pretrained DNN ## (Aggregated Residual Transformations for Deep Neural Networks) # Note that thanks to the change that I included in the above function, now # it's very easy to prove with differents NN. alexnet = models.alexnet(pretrained=True) # Create array to store results human=np.zeros((100,1)) dog=np.zeros((100,1)) # iterate over the list for x in range(len(human_files_short)): human[x]=dog_detector(human_files_short[x],alexnet) dog[x]=dog_detector(dog_files_short[x],alexnet) # Results print('The percent of dogs detected with Alexnet in the humans files were {}%'.format(sum(human)[0])) print('The percent of dogs detected with Alexnet in the dog files were {}%'.format(sum(dog)[0])) ###Output Downloading: "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth" to /root/.torch/models/alexnet-owt-4df8aa71.pth 100%|██████████| 244418560/244418560 [00:02<00:00, 95000857.08it/s] ###Markdown Well, isn't bad but is better to use "vgg16" as sugested! --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # TODO: Define transforms for the training data and testing data # In all the cases the transformations consist in: resize to 255, # crop using 244 pixels, create a tensor object and normalize # In the case of the training data I included a random rotation # to provide variability to the training and more generalization power to the NN. train_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(244), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Validation and testing transformation val_test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(244), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Transform the data train_data = datasets.ImageFolder('/data/dog_images' + '/train', transform=train_transforms) valid_data = datasets.ImageFolder('/data/dog_images' + '/valid', transform=val_test_transforms) test_data = datasets.ImageFolder('/data/dog_images' + '/test', transform=val_test_transforms) # Create the data loaders trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=64) testloader = torch.utils.data.DataLoader(test_data, batch_size=64) # Create a dictionary to storage all the loaders loaders_scratch={'train': trainloader, 'valid': validloader, 'test': testloader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: * First, I resized all images to 255 using bilinear interpolation. This interpolation creates a continuous transition between color values. In my opinion, the size selection (255) is accurate because our dataset is composed by common images that people take diary, which typically have this kind of size. In other cases, for example, in microscopy, it's usual to generate images of 512x512, but this is not our case. Next, the images were crop to 244, note that this action was taken because normally people keep attention in the center of the image, so, the information in the center of the images should be more relevant to the classification algorithm.* The input tensor will have a size of [64,3,244,244]. Note that our images have 3 channels with a size 244 x 244, also, I selected a batch size of 64 (64 training examples to be used in each iteration). This batch size is set mostly empirically, but some studies have shown that large batch size conducts to a degradation of the model, and small ones creates underfitting models. In my opinion, a batch size of 64 is a good starting point for this case where you have a lot of training images.* I decided to augment the dataset generating random rotations of the images between 0 and 30 degrees. This kind fo alternative pushes the NN to learn more intrinsic patterns and deal with noise scenarios, which at the end, increase its prediction capabilities. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # Input 3 channels and output 64 channels (the convolutin operation is going to # reduce he 3 channels of 244x244 image to 64 channels of 123x123) self.conv_0 = nn.Conv2d(3,64,kernel_size=(6,6),stride=(2,2),padding=(3,3),bias=True) # Normalize the values self.normalization_0 = nn.BatchNorm2d(64,eps=0.000001,momentum=0.1) # Pool operation (This is going to take the 64 images of 123x123 and transformed it # to 64 images of 62x62) self.pool_0 = nn.MaxPool2d(kernel_size=3,stride=2,padding=1) # Convolution the 64 images to 128. This is going to generate 128 images of # 31x31 self.conv_1 = nn.Conv2d(64,128,kernel_size=(4,4),stride=(2,2),padding=1) # Normalize self.normalization_1 = nn.BatchNorm2d(128,eps=0.000001,momentum=0.1,affine=True) # Pool operation. We are going to obtain 128 images of 8x8 self.pool_1 = nn.MaxPool2d(kernel_size=5,stride=4,padding=1) # Now we can convert the elements to a vector of 128x8x8=32768 units for the # total connected neural networks self.classifier_input = nn.Linear(in_features=8192,out_features=1000,bias=True) self.classifier_hidden1 = nn.Linear(in_features=1000,out_features=500,bias=True) self.classifier_hidden2 = nn.Linear(in_features=500,out_features=200,bias=True) self.output = nn.Linear(in_features=200,out_features=133,bias=True) def forward(self, x): ## Define forward behavior x = self.conv_0(x) x = F.relu(self.normalization_0(x)) x = self.pool_0(x) x = self.conv_1(x) x = F.relu(self.normalization_1(x)) x = self.pool_1(x) # Reshape data to inpuut to the input layer x = x.view(-1, 128*8*8) # x = F.relu(self.classifier_input(x)) x = F.relu(self.classifier_hidden1(x)) x = F.relu(self.classifier_hidden2(x)) x = F.log_softmax(self.output(x),dim=1) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() model_scratch ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The architecture of this model is composed by two main pieces:1. **Features Extraction:** * The images to feed the neural networks have a size of $3 \times 224 \times 224$, so the first logical step is to convolve the images to generate a characteristic space. For this, I went from the 3 original channels to 64 using a kernel size of $6\times 6$ with stride 2 and padding 3. This convolution layer generated a layer with 64 characteristics with size $123\times 123$ that was normalized using Bach Normalization. * Next, I included a Pooling Layer (size 3x3 with stride 2 and padding 1) with the objective of reducing the parametric space and provide the layer with more generalization power. This strategy generates a layer with 64 channels and characteristics with dimension $64\times 64$. * The next step is a convolution that generate 128 channels from 64 with a kernel zise of 4, stride 2 and padding 1. Note that with this operation we are reducing the dimensionality of the characteristics to $31\times 31$. As before, I include a normalization operation after the convolution. * Finally, the feature extraction stage finishes with a pooling operation. For this, I used Max Pool with kernel size 5, stride 4, and padding 1. This is extreme and I know it, but at this point, I don't have a lot of time or computational resources to spend in a model that is not going to obtain a good accuracy because it is too simple for the complexity of this problem.2. **Classifier:** This is a fully connected NN with the following structure. * One linear layer with an input of 8192 units and 1000 output neurons and a bias unit. Activation function Rectified Linear Unit (ReLU). * A second linear layer with an input of 1000 units and 500 output neurons and a bias unit. Activation function Rectified Linear Unit (ReLU). * Another hidden layer with an input of 500 units and 200 output neurons and a bias unit. Activation function Rectified Linear Unit (ReLU). * A final output layer with an input of 200 neurons and an output of 133 (number of classes). In this case, I used a Log Softmax function to compute the logarithm of the probability. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.NLLLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(),lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() log_ps = model(data) loss = criterion(log_ps, target) loss.backward() optimizer.step() ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### # Turn off gradients for validation, saves memory and computations with torch.no_grad(): model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # Prediction log_prediction=model(data) # Compute the probability ps = torch.exp(log_prediction) # top_p, top_class = ps.topk(1, dim=1) # this_loss = criterion(top_class, target).item() ## update the average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (this_loss - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # torch.save(model.state_dict(), save_path) # return trained model return model # train the model (the model is already trained and saved, I commented this line to avoid loss time) #model_scratch = train(15, loaders_scratch, model_scratch, optimizer_scratch, # criterion_scratch, use_cuda, 'model_scratch.pt') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class ps_output=torch.exp(output) pred = ps_output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # I included the next piece of code to avoid errors if torch.cuda.is_available(): map_location=lambda storage, loc: storage.cuda() else: map_location='cpu' # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt',map_location=map_location)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.793828 Test Accuracy: 15% (129/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # In all the cases the transformations consist in: resize to 255, # crop using 244 pixels, create a tensor object and normalize # In the case of the training data I included a random rotation # to provide variability to the training and more generalization power to the NN. train_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(244), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Validation and testing transformation val_test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(244), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Transform the data train_data = datasets.ImageFolder('/data/dog_images' + '/train', transform=train_transforms) valid_data = datasets.ImageFolder('/data/dog_images' + '/valid', transform=val_test_transforms) test_data = datasets.ImageFolder('/data/dog_images' + '/test', transform=val_test_transforms) # Create the data loaders trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=64) testloader = torch.utils.data.DataLoader(test_data, batch_size=64) # Create a dictionary to storage all the loaders loaders_transfer={'train': trainloader, 'valid': validloader, 'test': testloader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code from collections import OrderedDict import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.densenet121(pretrained=True) # The model is composed by two sections: 1. Convolutional Stage, 2. Classifier (full connected NN) # I am going to keep the architecture and parameters in the first section and create a new # classifier in accordance with our project. # Freeze parameters so we don't backprop through them for param in model_transfer.parameters(): param.requires_grad = False # If you print the model (print(model_transfer)), you can see that the output of the convolution section # contains 1024 nodes, so, this is the input of the out full conected NN. model_transfer.classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(1024, 500)), ('relu', nn.ReLU()), ('fc2', nn.Linear(500, 133)), ('output', nn.LogSoftmax(dim=1))])) if use_cuda: model_transfer = model_transfer.cuda() model_transfer ###Output /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_. ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ * I used a [Dense Convolutional Network (DenseNet)](https://pytorch.org/hub/pytorch_vision_densenet/) for the transfer learning. These networks are composed by two stages, the first one is related with the features extraction while the second is the classifier, which is a full connected neural networks. In general, DenseNet have several advantages (see the [original paper](https://arxiv.org/pdf/1608.06993.pdf) for details) that makes them desirable for a lot of applications, for example, reduce the number of parameters (less memory and computational resources), alleviate the vanishing of the gradient and empower feature propagation. * In general, the network is composed by $121$ layers. The input layer expect scaled images (values between [0, 1]) with shape $3\times 224\times 224$ with a normalization given by $mean =[0.485,0.456,0.406]$ and $sd =[0.229, 0.224, 0.225]$.* The original classifier in DenseNet is composed by a single fully connected layer with input of 1024 neurons and an output of 1000 units. I changed this architecture in a way that it comply with our problem specificities: * One linear layer with an input of 1024 units and 500 output neurons and a bias unit. Activation function Rectified Linear Unit (ReLU). * A final output layer with an input of 500 neurons and an output of 133 (number of classes). In this case, I used a Log Softmax function to compute the logarithm of the probability. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.NLLLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(),lr=0.03) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model #model_transfer = train(25, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.553665 Validation Loss: 4.135879 Epoch: 2 Training Loss: 3.418205 Validation Loss: 2.815883 Epoch: 3 Training Loss: 2.170602 Validation Loss: 1.822265 Epoch: 4 Training Loss: 1.469322 Validation Loss: 1.360983 Epoch: 5 Training Loss: 1.121332 Validation Loss: 1.061042 Epoch: 6 Training Loss: 0.918619 Validation Loss: 0.911601 Epoch: 7 Training Loss: 0.814958 Validation Loss: 0.874710 Epoch: 8 Training Loss: 0.729003 Validation Loss: 0.746070 Epoch: 9 Training Loss: 0.657601 Validation Loss: 0.726075 Epoch: 10 Training Loss: 0.618595 Validation Loss: 0.697037 Epoch: 11 Training Loss: 0.573179 Validation Loss: 0.676995 Epoch: 12 Training Loss: 0.548976 Validation Loss: 0.624813 Epoch: 13 Training Loss: 0.528498 Validation Loss: 0.632919 Epoch: 14 Training Loss: 0.491777 Validation Loss: 0.575175 Epoch: 15 Training Loss: 0.472002 Validation Loss: 0.548469 Epoch: 16 Training Loss: 0.442853 Validation Loss: 0.568504 Epoch: 17 Training Loss: 0.434660 Validation Loss: 0.531804 Epoch: 18 Training Loss: 0.430914 Validation Loss: 0.521330 Epoch: 19 Training Loss: 0.404939 Validation Loss: 0.539403 Epoch: 20 Training Loss: 0.395123 Validation Loss: 0.525363 Epoch: 21 Training Loss: 0.380786 Validation Loss: 0.487043 Epoch: 22 Training Loss: 0.370200 Validation Loss: 0.469350 Epoch: 23 Training Loss: 0.366010 Validation Loss: 0.455254 Epoch: 24 Training Loss: 0.348563 Validation Loss: 0.478358 Epoch: 25 Training Loss: 0.344071 Validation Loss: 0.483245 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # I included the next piece of code to avoid errors if torch.cuda.is_available(): map_location=lambda storage, loc: storage.cuda() else: map_location='cpu' # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt',map_location=map_location)) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.532774 Test Accuracy: 84% (707/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. > With the objective to not repeat the above code every single time that I work on this notebook, I am going to include a cell to load all the necessary packages, create the model and configure CUDA. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] # # Create a function to Transform to tensor and normalize transformation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(244), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def predict_breed_transfer(img_path,transform_func,model,class_names,use_cuda): # load the image and return the predicted breed # Load Image image = Image.open(img_path) # Transformation image_tensor = transform_func(image).float() image_tensor = image_tensor.unsqueeze_(0) #image_tensor = Variable(image_tensor) # move to GPU if use_cuda: image_tensor = image_tensor.cuda() # Prediction (log probabilities) log_output = model(image_tensor) # convert output probabilities to predicted class ps_output=torch.exp(log_output) pred = ps_output.data.max(1, keepdim=True)[1] return class_names[pred.item()] import random # Test the function with 10 random paths for img_path in random.sample(list(dog_files),10): print('Path: {},\t Prediction: {}'.format(img_path[27:], predict_breed_transfer(img_path,transformation,model_transfer,class_names,use_cuda))) ###Output Path: English_toy_spaniel/English_toy_spaniel_04503.jpg, Prediction: English toy spaniel Path: Lhasa_apso/Lhasa_apso_06627.jpg, Prediction: Havanese Path: Irish_setter/Irish_setter_05831.jpg, Prediction: Irish setter Path: Xoloitzcuintli/Xoloitzcuintli_08283.jpg, Prediction: Xoloitzcuintli Path: Great_pyrenees/Great_pyrenees_05417.jpg, Prediction: Great pyrenees Path: Bulldog/Bulldog_02866.jpg, Prediction: Bulldog Path: Cocker_spaniel/Cocker_spaniel_03756.jpg, Prediction: Cocker spaniel Path: Border_collie/Border_collie_02016.jpg, Prediction: Border collie Path: erman_shorthaired_pointer/German_shorthaired_pointer_04974.jpg, Prediction: German shorthaired pointer Path: nglish_toy_spaniel/English_toy_spaniel_04542.jpg, Prediction: English toy spaniel ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): # Human face detector human=face_detector(img_path) # Dog Detector dog=dog_detector(img_path,vgg16) # Output if human: name='Human' prediction=predict_breed_transfer(img_path,transformation,model_transfer,class_names,use_cuda) elif dog: name= 'Dog' prediction=predict_breed_transfer(img_path,transformation,model_transfer,class_names,use_cuda) else: name='You are not a Human or Dog!!!' prediction='ERROR!! No Classification available for this Image' return name, prediction ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ * I had a lot of fun with this project!!! The output is great and in my opinion this project shows the hugh power of Deep Learning. Of course, it can be improved, here are some ideas: 1. Try with other pre-trained CNN. 2. Try other architectures for the classification block in the NN. 3. Tune the learning rate and the number of epochs. 4. If possible, include more images in the training phase. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. from PIL import Image, ImageDraw, ImageFont import cv2 import matplotlib.pyplot as plt %matplotlib inline ## suggested code, below for file in np.hstack((random.sample(list(human_files),3), random.sample(list(dog_files),3))): kind, breed=run_app(file) # load Image image = Image.open(file) # display the image, along with bounding box plt.imshow(image) plt.text(0.25, 0.9, 'Hello '+ kind+'!!!', fontsize=14, transform=plt.gcf().transFigure) plt.text(1, 0.7, 'You look like a ....\n' + breed+' :)', color='r',fontsize=14, transform=plt.gcf().transFigure) plt.show() ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. correct_human_count = 0 wrong_human_count = 0 for human_file in human_files_short: if face_detector(human_file): correct_human_count += 1 for dog_file in dog_files_short: if face_detector(dog_file): wrong_human_count +=1 print('{}% of human images are detected in the first 100 human files'.format(correct_human_count)) print('{}% of human images are detected in the first 100 dog files'.format(wrong_human_count)) ###Output 96% of human images are detected in the first 100 human files 18% of human images are detected in the first 100 dog files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # open the image img = Image.open(img_path).convert('RGB') # preprocessing using transform pipeline min_img_size = 224 # VGG16 takes image of 224x224 transform_pipeline = transforms.Compose([transforms.Resize((min_img_size,min_img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform_pipeline(img) img = img.unsqueeze(0) # the pretrained model takes input dim [batch_size, channel, H, W] so we need insert size one at index0 location # put the image to GPU if use_cuda: img = img.cuda() VGG16.eval() pred = VGG16(img) if use_cuda: pred = pred.cpu() # put prediction back to cpu pred = pred.data.numpy().argmax() # get the index for the most possible class return pred # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. idx = VGG16_predict(img_path) return idx in range(151,269) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. correct_dog_count = 0 wrong_dog_count = 0 for human_file in human_files_short: if dog_detector(human_file): wrong_dog_count += 1 for dog_file in dog_files_short: if dog_detector(dog_file): correct_dog_count +=1 print('{}% of dog images are detected in the first 100 human files'.format(wrong_dog_count)) print('{}% of dog images are detected in the first 100 dog files'.format(correct_dog_count)) ###Output 0% of dog images are detected in the first 100 human files 93% of dog images are detected in the first 100 dog files ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # specify the transforms for train/valid/test dataset data_transform = {'train':transforms.Compose([transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), 'valid':transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), 'test':transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])} # use Pytorch build-in function ImageFolder to create the customerized dataset and its labels train_dir = 'dogImages/train' valid_dir = 'dogImages/valid' test_dir = 'dogImages/test' image_datasets = {'train':datasets.ImageFolder(root=train_dir, transform=data_transform['train']), 'valid':datasets.ImageFolder(root=valid_dir, transform=data_transform['valid']), 'test':datasets.ImageFolder(root=test_dir, transform=data_transform['test'])} batch_size=4 # create the DataLoader for the train/test/valid image_loader = {'train':torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, shuffle=True), 'valid':torch.utils.data.DataLoader(image_datasets['valid'], batch_size=batch_size, shuffle=False), 'test':torch.utils.data.DataLoader(image_datasets['test'], batch_size=batch_size, shuffle=False)} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: I choose to resize the images to 224x224, which is the standard size for many of the ImageNet pretrained models. I applied the random horizontal flip and the random rotation for the training data set, so that we can add some more variations to our training data and take different situations into consideration (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # local features self.conv1 = nn.Conv2d(3, 16, 3) self.conv2 = nn.Conv2d(16, 32, 3) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64, 128, 3) self.conv5 = nn.Conv2d(128, 256, 3) self.bn1 = nn.BatchNorm2d(16) self.bn2 = nn.BatchNorm2d(32) self.bn3 = nn.BatchNorm2d(64) self.bn4 = nn.BatchNorm2d(128) self.bn5 = nn.BatchNorm2d(256) self.dropout1 = nn.Dropout(p=0.1) self.dropout2 = nn.Dropout(p=0.2) self.dropout3 = nn.Dropout(p=0.3) self.dropout4 = nn.Dropout(p=0.4) self.dropout5 = nn.Dropout(p=0.5) self.pool = nn.MaxPool2d(2, 2, ceil_mode=True) self.fc1 = nn.Linear(6*6*256, 133) def forward(self, x): ## Define forward behavior x = self.pool(self.bn1(self.conv1(x))) x = F.relu(x) x = self.dropout1(x) x = self.pool(self.bn2(self.conv2(x))) x = F.relu(x) x = self.dropout2(x) x = self.pool(self.bn3(self.conv3(x))) x = F.relu(x) x = self.dropout3(x) x = self.pool(self.bn4(self.conv4(x))) x = F.relu(x) x = self.dropout4(x) x = self.pool(self.bn5(self.conv5(x))) x = F.relu(x) x = self.dropout5(x) x = x.view(-1, 6*6*256) return self.fc1(x) #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (1) Adjust the learning rate: The initial learning rate I used was too large so the train loss and valid loss stop decreasing after first few epochs (2) Decrease the number of the fully connected layers: Initially I can only reach single digit accuracy with 3 FC layers. After keeping only the last FC layer, I was able to reach accuracy $>10\%$ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() # forward pass output = model.forward(data) # calculate loss loss = criterion(output,target) # backward pass loss.backward() # update weight optimizer.step() # calculate the average training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass output = model.forward(data) # calculate loss loss = criterion(output,target) # calculate average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min : valid_loss_min = valid_loss torch.save(model.state_dict(),save_path) # return trained model return model # train the model model_scratch = train(100, image_loader, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 6.535180 Validation Loss: 4.677166 Epoch: 2 Training Loss: 4.579511 Validation Loss: 4.458007 Epoch: 3 Training Loss: 4.379362 Validation Loss: 4.347892 Epoch: 4 Training Loss: 4.202756 Validation Loss: 4.217092 Epoch: 5 Training Loss: 4.013939 Validation Loss: 4.188818 Epoch: 6 Training Loss: 3.845044 Validation Loss: 4.127485 Epoch: 7 Training Loss: 3.667336 Validation Loss: 3.967301 Epoch: 8 Training Loss: 3.552296 Validation Loss: 4.052395 Epoch: 9 Training Loss: 3.394723 Validation Loss: 3.969833 Epoch: 10 Training Loss: 3.267278 Validation Loss: 3.906016 Epoch: 11 Training Loss: 3.151727 Validation Loss: 3.833358 Epoch: 12 Training Loss: 3.038300 Validation Loss: 3.861567 Epoch: 13 Training Loss: 2.925566 Validation Loss: 3.798557 Epoch: 14 Training Loss: 2.802108 Validation Loss: 3.870739 Epoch: 15 Training Loss: 2.710743 Validation Loss: 3.780680 Epoch: 16 Training Loss: 2.612591 Validation Loss: 3.775549 Epoch: 17 Training Loss: 2.561399 Validation Loss: 4.025845 Epoch: 18 Training Loss: 2.457158 Validation Loss: 3.767143 Epoch: 19 Training Loss: 2.382396 Validation Loss: 3.837120 Epoch: 20 Training Loss: 2.345568 Validation Loss: 3.825437 Epoch: 21 Training Loss: 2.230854 Validation Loss: 3.861032 Epoch: 22 Training Loss: 2.150217 Validation Loss: 3.881824 Epoch: 23 Training Loss: 2.068412 Validation Loss: 3.912156 Epoch: 24 Training Loss: 2.047470 Validation Loss: 3.953211 Epoch: 25 Training Loss: 1.995846 Validation Loss: 3.917765 Epoch: 26 Training Loss: 1.944202 Validation Loss: 4.006778 Epoch: 27 Training Loss: 1.881262 Validation Loss: 3.911252 Epoch: 28 Training Loss: 1.807838 Validation Loss: 4.015585 Epoch: 29 Training Loss: 1.794873 Validation Loss: 3.787535 Epoch: 30 Training Loss: 1.762848 Validation Loss: 4.055448 Epoch: 31 Training Loss: 1.732810 Validation Loss: 3.991674 Epoch: 32 Training Loss: 1.646995 Validation Loss: 3.891724 Epoch: 33 Training Loss: 1.651290 Validation Loss: 3.962487 Epoch: 34 Training Loss: 1.627263 Validation Loss: 4.087136 Epoch: 35 Training Loss: 1.576605 Validation Loss: 3.918119 Epoch: 36 Training Loss: 1.546039 Validation Loss: 3.903914 Epoch: 37 Training Loss: 1.466188 Validation Loss: 4.236403 Epoch: 38 Training Loss: 1.466267 Validation Loss: 3.997410 Epoch: 39 Training Loss: 1.423082 Validation Loss: 4.043931 Epoch: 40 Training Loss: 1.384504 Validation Loss: 3.977584 Epoch: 41 Training Loss: 1.373653 Validation Loss: 3.995446 Epoch: 42 Training Loss: 1.372558 Validation Loss: 4.393724 Epoch: 43 Training Loss: 1.347031 Validation Loss: 4.138809 Epoch: 44 Training Loss: 1.311542 Validation Loss: 4.335519 Epoch: 45 Training Loss: 1.315819 Validation Loss: 4.115313 Epoch: 46 Training Loss: 1.263770 Validation Loss: 4.011296 Epoch: 47 Training Loss: 1.266159 Validation Loss: 4.132951 Epoch: 48 Training Loss: 1.242131 Validation Loss: 4.237012 Epoch: 49 Training Loss: 1.235539 Validation Loss: 4.296589 Epoch: 50 Training Loss: 1.170381 Validation Loss: 4.082350 Epoch: 51 Training Loss: 1.213809 Validation Loss: 4.007591 Epoch: 52 Training Loss: 1.175583 Validation Loss: 4.155737 Epoch: 53 Training Loss: 1.111725 Validation Loss: 4.285254 Epoch: 54 Training Loss: 1.129173 Validation Loss: 4.037244 Epoch: 55 Training Loss: 1.107103 Validation Loss: 4.353430 Epoch: 56 Training Loss: 1.047641 Validation Loss: 4.206831 Epoch: 57 Training Loss: 1.064121 Validation Loss: 4.290906 Epoch: 58 Training Loss: 1.041711 Validation Loss: 4.350050 Epoch: 59 Training Loss: 1.020256 Validation Loss: 4.426517 Epoch: 60 Training Loss: 1.032308 Validation Loss: 4.434050 Epoch: 61 Training Loss: 1.020000 Validation Loss: 4.386990 Epoch: 62 Training Loss: 1.021717 Validation Loss: 4.337529 Epoch: 63 Training Loss: 0.972615 Validation Loss: 4.747813 Epoch: 64 Training Loss: 0.984052 Validation Loss: 4.538648 Epoch: 65 Training Loss: 0.972819 Validation Loss: 4.518819 Epoch: 66 Training Loss: 0.923880 Validation Loss: 4.587272 Epoch: 67 Training Loss: 0.972014 Validation Loss: 4.301955 Epoch: 68 Training Loss: 0.908055 Validation Loss: 4.415642 Epoch: 69 Training Loss: 0.868910 Validation Loss: 4.788167 Epoch: 70 Training Loss: 0.906851 Validation Loss: 4.737288 Epoch: 71 Training Loss: 0.897360 Validation Loss: 4.458452 Epoch: 72 Training Loss: 0.887370 Validation Loss: 4.612539 Epoch: 73 Training Loss: 0.854165 Validation Loss: 4.476861 Epoch: 74 Training Loss: 0.878642 Validation Loss: 4.524493 Epoch: 75 Training Loss: 0.858775 Validation Loss: 4.475894 Epoch: 76 Training Loss: 0.818167 Validation Loss: 4.833607 Epoch: 77 Training Loss: 0.794436 Validation Loss: 4.494308 Epoch: 78 Training Loss: 0.834804 Validation Loss: 4.636078 Epoch: 79 Training Loss: 0.791427 Validation Loss: 4.312651 Epoch: 80 Training Loss: 0.810831 Validation Loss: 4.730837 Epoch: 81 Training Loss: 0.787947 Validation Loss: 4.590734 Epoch: 82 Training Loss: 0.803169 Validation Loss: 4.760179 Epoch: 83 Training Loss: 0.789365 Validation Loss: 4.746623 Epoch: 84 Training Loss: 0.782478 Validation Loss: 4.638498 Epoch: 85 Training Loss: 0.785182 Validation Loss: 4.802215 Epoch: 86 Training Loss: 0.755081 Validation Loss: 4.570581 Epoch: 87 Training Loss: 0.735783 Validation Loss: 4.634118 Epoch: 88 Training Loss: 0.751213 Validation Loss: 4.757229 Epoch: 89 Training Loss: 0.732998 Validation Loss: 4.692438 Epoch: 90 Training Loss: 0.723866 Validation Loss: 4.297959 Epoch: 91 Training Loss: 0.719183 Validation Loss: 4.904610 Epoch: 92 Training Loss: 0.731325 Validation Loss: 4.469810 Epoch: 93 Training Loss: 0.742909 Validation Loss: 4.754427 Epoch: 94 Training Loss: 0.738365 Validation Loss: 4.599453 Epoch: 95 Training Loss: 0.715221 Validation Loss: 4.990715 Epoch: 96 Training Loss: 0.731477 Validation Loss: 4.534232 Epoch: 97 Training Loss: 0.664168 Validation Loss: 4.767763 Epoch: 98 Training Loss: 0.703913 Validation Loss: 4.850485 Epoch: 99 Training Loss: 0.667099 Validation Loss: 4.823792 Epoch: 100 Training Loss: 0.676445 Validation Loss: 4.876890 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(image_loader, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.847057 Test Accuracy: 13% (115/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # create the DataLoader for the train/test/valid # specify the transforms for train/valid/test dataset data_transform = {'train':transforms.Compose([transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'valid':transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test':transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])} # use Pytorch build-in function ImageFolder to create the customerized dataset and its labels train_dir = 'dogImages/train' valid_dir = 'dogImages/valid' test_dir = 'dogImages/test' image_datasets = {'train':datasets.ImageFolder(root=train_dir, transform=data_transform['train']), 'valid':datasets.ImageFolder(root=valid_dir, transform=data_transform['valid']), 'test':datasets.ImageFolder(root=test_dir, transform=data_transform['test'])} batch_size=4 # create the DataLoader for the train/test/valid image_loader = {'train':torch.utils.data.DataLoader(image_datasets['train'], batch_size=batch_size, shuffle=True), 'valid':torch.utils.data.DataLoader(image_datasets['valid'], batch_size=batch_size, shuffle=False), 'test':torch.utils.data.DataLoader(image_datasets['test'], batch_size=batch_size, shuffle=False)} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) # freeze the model weights for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(model_transfer.fc.in_features, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I choose to use ResNet50 as my backbone model because it is usually trained faster than VGG. It also deals with the vanishing-gradient problem. For this project, I simply replaced the last fully connected layer (with 1000 classes) in resnet with 133 classes (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code n_epochs = 10 # train the model model_transfer = train(n_epochs, image_loader, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 3.036485 Validation Loss: 1.238674 Epoch: 2 Training Loss: 1.862926 Validation Loss: 1.062563 Epoch: 3 Training Loss: 1.689053 Validation Loss: 1.217455 Epoch: 4 Training Loss: 1.597970 Validation Loss: 1.104364 Epoch: 5 Training Loss: 1.561124 Validation Loss: 1.106137 Epoch: 6 Training Loss: 1.461867 Validation Loss: 1.136581 Epoch: 7 Training Loss: 1.499067 Validation Loss: 1.320460 Epoch: 8 Training Loss: 1.424684 Validation Loss: 1.311012 Epoch: 9 Training Loss: 1.456773 Validation Loss: 1.266302 Epoch: 10 Training Loss: 1.403454 Validation Loss: 1.332026 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(image_loader, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.063519 Test Accuracy: 72% (604/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in image_datasets['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed # open the image img = Image.open(img_path).convert('RGB') # preprocessing using transform pipeline min_img_size = 224 # ResNet50 takes image of 224x224 transform_pipeline = transforms.Compose([transforms.Resize((min_img_size,min_img_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform_pipeline(img) img = img.unsqueeze(0) # the pretrained model takes input dim [batch_size, channel, H, W] so we need insert size one at index0 location # put the image to GPU if use_cuda: img = img.cuda() model_transfer.eval() pred_idx = model_transfer(img) if use_cuda: pred_idx = pred_idx.cpu() # put prediction back to cpu pred_idx = pred_idx.data.numpy().argmax() # get the index for the most possible class return class_names[pred_idx] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither img = Image.open(img_path).convert('RGB') plt.figure() plt.imshow(img) plt.show() if dog_detector(img_path): print(predict_breed_transfer(img_path)) elif face_detector(img_path): print('You look like a ...') print(predict_breed_transfer(img_path)) else: print('No dog/human detected') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The output is better than I expected for predicting the dog breed. It can identify and tell the difference between the golden retriever and the labrador retriever (with some candies on its nose!). Even though it misclassifies the husky with Alaskan malamute but sometimes even human cannot tell the difference between them too. To improve the performance:(1) Add more training images or add more different image augmentation methods to further improve diversity of the training dataset (2) Tried adding some more fully connected layers to see if can help the model to identiy the subtle differences better, such as the difference between the husky and the Alaskan malamute (3) We can see that the valid loss stops decreasing after 2nd epochs while the train loss keeps decreasing even after 10 epochs. We can try to add drop out layer before the last FC layer to see if can help to further decrease the valid loss ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. test_files = np.array(glob("test/*")) ## suggested code, below for file in test_files: run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ 96% human pictures are recognized as human face. 18% dog pictures are recognized as human face. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human = 0 dog = 0 for human_file in tqdm(human_files_short): human += 1 * face_detector(human_file) for dog_files in tqdm(dog_files_short): dog += 1 * face_detector(dog_files) print('{}% human pictures are recognized as human face.'.format(human)) print('{}% dog pictures are recognized as human face.'.format(dog)) ###Output 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 64.77it/s] 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:06<00:00, 15.91it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to C:\Users\lzb/.cache\torch\checkpoints\vgg16-397923af.pth 100%|████████████████████████████████████████████████████████████████████████████████| 528M/528M [35:04<00:00, 263kB/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = Image.open(img_path) img = transform(img) img = torch.unsqueeze(img, 0) if torch.cuda.is_available(): img = img.cuda() prediction = VGG16(img) if torch.cuda.is_available(): prediction = prediction.cpu() index = prediction.data.numpy().argmax() return index # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. label = VGG16_predict(img_path) logic = label >= 151 and label <= 268 return logic# true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 0% and 95% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human = 0 dog = 0 for human_file in tqdm(human_files_short): human += 1 * dog_detector(human_file) for dog_file in tqdm(dog_files_short): dog += 1 * dog_detector(dog_file) print('{}% human pictures are recognized as dog.'.format(human)) print('{}% dog pictures are recognized as dog.'.format(dog)) ###Output 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:02<00:00, 36.62it/s] 100%|████████████████████████████████████████████████████████████████████████████████| 100/100 [00:01<00:00, 62.58it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torchvision from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes Dir = './dogImages' train_path = os.path.join(Dir, 'train') val_path = os.path.join(Dir, 'valid') test_path = os.path.join(Dir, 'test') batch = 32 train_transformer = transforms.Compose([transforms.Resize(224), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(30), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_dataset = datasets.ImageFolder(root=train_path,transform=train_transformer) train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = batch, shuffle = True) valid_transformer=transforms.Compose([transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) val_dataset=datasets.ImageFolder(root=val_path,transform=valid_transformer) val_loader = torch.utils.data.DataLoader(dataset = val_dataset, batch_size = batch) test_transformer = transforms.Compose([transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_dataset = datasets.ImageFolder(root=test_path,transform=test_transformer) test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = batch) loaders_scratch={'train':train_loader,'valid':val_loader,'test':test_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I just resize the picture to 224 * 224 as in VGG model.I augment the dataset using all method I know. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.batchnorm1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.batchnorm2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.batchnorm3 = nn.BatchNorm2d(64) self.maxpool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(28 * 28 * 64, 512) self.fc2 = nn.Linear(512, 133) self.dropout = nn.Dropout(0.5) def forward(self, x): x = self.maxpool(F.relu(self.conv1(x))) x = self.batchnorm1(x) x = self.maxpool(F.relu(self.conv2(x))) x = self.batchnorm2(x) x = self.maxpool(F.relu(self.conv3(x))) x = self.batchnorm3(x) x = x.view(-1, 28 * 28 * 64) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ It's natural to come up with a three Convolutional payer with two folly connected layer. Some tricks preventing overfitting are also involved. It's just the most naiive model. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(params = model_scratch.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() outputs=model(data) loss=criterion(outputs,target) loss.backward() optimizer.step() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss outputs = model(data) loss = criterion(outputs, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss<valid_loss_min: print('Validation loss descreased from {} to {}. save the model'.format(valid_loss_min,valid_loss)) torch.save(model.state_dict(), save_path) # save the model valid_loss_min=valid_loss # update decreased validation loss # return trained model return model # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.900559 Validation Loss: 4.771286 Validation loss descreased from inf to 4.771285533905029. save the model Epoch: 2 Training Loss: 4.744391 Validation Loss: 4.646235 Validation loss descreased from 4.771285533905029 to 4.64623498916626. save the model Epoch: 3 Training Loss: 4.619739 Validation Loss: 4.556045 Validation loss descreased from 4.64623498916626 to 4.556045055389404. save the model Epoch: 4 Training Loss: 4.511214 Validation Loss: 4.469339 Validation loss descreased from 4.556045055389404 to 4.469339370727539. save the model Epoch: 5 Training Loss: 4.424766 Validation Loss: 4.404804 Validation loss descreased from 4.469339370727539 to 4.404804229736328. save the model Epoch: 6 Training Loss: 4.341077 Validation Loss: 4.347012 Validation loss descreased from 4.404804229736328 to 4.347012042999268. save the model Epoch: 7 Training Loss: 4.283187 Validation Loss: 4.294446 Validation loss descreased from 4.347012042999268 to 4.2944464683532715. save the model Epoch: 8 Training Loss: 4.211464 Validation Loss: 4.250584 Validation loss descreased from 4.2944464683532715 to 4.250584125518799. save the model Epoch: 9 Training Loss: 4.155056 Validation Loss: 4.224713 Validation loss descreased from 4.250584125518799 to 4.224713325500488. save the model Epoch: 10 Training Loss: 4.095991 Validation Loss: 4.196362 Validation loss descreased from 4.224713325500488 to 4.196362495422363. save the model Epoch: 11 Training Loss: 4.041405 Validation Loss: 4.165818 Validation loss descreased from 4.196362495422363 to 4.165818214416504. save the model Epoch: 12 Training Loss: 3.987286 Validation Loss: 4.140038 Validation loss descreased from 4.165818214416504 to 4.140038013458252. save the model Epoch: 13 Training Loss: 3.945042 Validation Loss: 4.117871 Validation loss descreased from 4.140038013458252 to 4.117871284484863. save the model Epoch: 14 Training Loss: 3.893033 Validation Loss: 4.093637 Validation loss descreased from 4.117871284484863 to 4.093637466430664. save the model Epoch: 15 Training Loss: 3.854425 Validation Loss: 4.069692 Validation loss descreased from 4.093637466430664 to 4.069692134857178. save the model Epoch: 16 Training Loss: 3.820509 Validation Loss: 4.050468 Validation loss descreased from 4.069692134857178 to 4.0504679679870605. save the model Epoch: 17 Training Loss: 3.763558 Validation Loss: 4.041994 Validation loss descreased from 4.0504679679870605 to 4.041994094848633. save the model Epoch: 18 Training Loss: 3.700162 Validation Loss: 4.014832 Validation loss descreased from 4.041994094848633 to 4.014832019805908. save the model Epoch: 19 Training Loss: 3.688504 Validation Loss: 4.002074 Validation loss descreased from 4.014832019805908 to 4.002074241638184. save the model Epoch: 20 Training Loss: 3.670305 Validation Loss: 3.979365 Validation loss descreased from 4.002074241638184 to 3.9793646335601807. save the model Epoch: 21 Training Loss: 3.601598 Validation Loss: 3.960568 Validation loss descreased from 3.9793646335601807 to 3.9605677127838135. save the model Epoch: 22 Training Loss: 3.563579 Validation Loss: 3.952921 Validation loss descreased from 3.9605677127838135 to 3.95292067527771. save the model Epoch: 23 Training Loss: 3.538419 Validation Loss: 3.939261 Validation loss descreased from 3.95292067527771 to 3.939260959625244. save the model Epoch: 24 Training Loss: 3.506235 Validation Loss: 3.925990 Validation loss descreased from 3.939260959625244 to 3.925990343093872. save the model Epoch: 25 Training Loss: 3.460555 Validation Loss: 3.917150 Validation loss descreased from 3.925990343093872 to 3.917149543762207. save the model Epoch: 26 Training Loss: 3.430554 Validation Loss: 3.918805 Epoch: 27 Training Loss: 3.392354 Validation Loss: 3.912269 Validation loss descreased from 3.917149543762207 to 3.91226863861084. save the model Epoch: 28 Training Loss: 3.367307 Validation Loss: 3.889531 Validation loss descreased from 3.91226863861084 to 3.889531135559082. save the model Epoch: 29 Training Loss: 3.346143 Validation Loss: 3.873925 Validation loss descreased from 3.889531135559082 to 3.873924970626831. save the model Epoch: 30 Training Loss: 3.314870 Validation Loss: 3.879460 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.824166 Test Accuracy: 11% (99/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer={'train':train_loader,'valid':val_loader,'test':test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet152(pretrained=True) for parameter in model_transfer.parameters(): parameter.requires_grad = False num_in=model_transfer.fc.in_features model_transfer.fc=nn.Linear(in_features=num_in,out_features=133) if use_cuda: model_transfer = model_transfer.cuda() ###Output Downloading: "https://download.pytorch.org/models/resnet152-b121ed2d.pth" to C:\Users\lzb/.cache\torch\checkpoints\resnet152-b121ed2d.pth 100%|████████████████████████████████████████████████████████████████████████████████| 230M/230M [12:08<00:00, 332kB/s] ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I think any pre-trained huge CNN would satisfy this dog project. So all I need to do is download one and put it into work. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(params=filter(lambda p: p.requires_grad, model_transfer.parameters()), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(30, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.895763 Validation Loss: 4.841892 Validation loss descreased from inf to 4.841891765594482. save the model Epoch: 2 Training Loss: 4.777186 Validation Loss: 4.741901 Validation loss descreased from 4.841891765594482 to 4.74190092086792. save the model Epoch: 3 Training Loss: 4.671309 Validation Loss: 4.649199 Validation loss descreased from 4.74190092086792 to 4.649199485778809. save the model Epoch: 4 Training Loss: 4.571595 Validation Loss: 4.560447 Validation loss descreased from 4.649199485778809 to 4.560446739196777. save the model Epoch: 5 Training Loss: 4.476359 Validation Loss: 4.474774 Validation loss descreased from 4.560446739196777 to 4.474774360656738. save the model Epoch: 6 Training Loss: 4.386364 Validation Loss: 4.389713 Validation loss descreased from 4.474774360656738 to 4.389712810516357. save the model Epoch: 7 Training Loss: 4.296069 Validation Loss: 4.308086 Validation loss descreased from 4.389712810516357 to 4.308085918426514. save the model Epoch: 8 Training Loss: 4.209229 Validation Loss: 4.219153 Validation loss descreased from 4.308085918426514 to 4.21915340423584. save the model Epoch: 9 Training Loss: 4.120545 Validation Loss: 4.142175 Validation loss descreased from 4.21915340423584 to 4.14217472076416. save the model Epoch: 10 Training Loss: 4.036023 Validation Loss: 4.054057 Validation loss descreased from 4.14217472076416 to 4.054056644439697. save the model Epoch: 11 Training Loss: 3.953520 Validation Loss: 3.967148 Validation loss descreased from 4.054056644439697 to 3.9671483039855957. save the model Epoch: 12 Training Loss: 3.874769 Validation Loss: 3.892146 Validation loss descreased from 3.9671483039855957 to 3.892145872116089. save the model Epoch: 13 Training Loss: 3.793030 Validation Loss: 3.816694 Validation loss descreased from 3.892145872116089 to 3.816694498062134. save the model Epoch: 14 Training Loss: 3.714922 Validation Loss: 3.733935 Validation loss descreased from 3.816694498062134 to 3.7339348793029785. save the model Epoch: 15 Training Loss: 3.639887 Validation Loss: 3.656284 Validation loss descreased from 3.7339348793029785 to 3.6562838554382324. save the model Epoch: 16 Training Loss: 3.555946 Validation Loss: 3.578949 Validation loss descreased from 3.6562838554382324 to 3.578949213027954. save the model Epoch: 17 Training Loss: 3.484847 Validation Loss: 3.510738 Validation loss descreased from 3.578949213027954 to 3.5107381343841553. save the model Epoch: 18 Training Loss: 3.414080 Validation Loss: 3.437927 Validation loss descreased from 3.5107381343841553 to 3.437927007675171. save the model Epoch: 19 Training Loss: 3.342188 Validation Loss: 3.369991 Validation loss descreased from 3.437927007675171 to 3.3699910640716553. save the model Epoch: 20 Training Loss: 3.279409 Validation Loss: 3.296014 Validation loss descreased from 3.3699910640716553 to 3.296013832092285. save the model Epoch: 21 Training Loss: 3.207107 Validation Loss: 3.240832 Validation loss descreased from 3.296013832092285 to 3.2408316135406494. save the model Epoch: 22 Training Loss: 3.142741 Validation Loss: 3.172839 Validation loss descreased from 3.2408316135406494 to 3.172839403152466. save the model Epoch: 23 Training Loss: 3.087517 Validation Loss: 3.138430 Validation loss descreased from 3.172839403152466 to 3.138429641723633. save the model Epoch: 24 Training Loss: 3.016525 Validation Loss: 3.030626 Validation loss descreased from 3.138429641723633 to 3.030626058578491. save the model Epoch: 25 Training Loss: 2.960712 Validation Loss: 2.981696 Validation loss descreased from 3.030626058578491 to 2.9816958904266357. save the model Epoch: 26 Training Loss: 2.899035 Validation Loss: 2.952440 Validation loss descreased from 2.9816958904266357 to 2.952439785003662. save the model Epoch: 27 Training Loss: 2.845441 Validation Loss: 2.871940 Validation loss descreased from 2.952439785003662 to 2.8719403743743896. save the model Epoch: 28 Training Loss: 2.787327 Validation Loss: 2.823863 Validation loss descreased from 2.8719403743743896 to 2.8238627910614014. save the model Epoch: 29 Training Loss: 2.733238 Validation Loss: 2.756764 Validation loss descreased from 2.8238627910614014 to 2.7567636966705322. save the model Epoch: 30 Training Loss: 2.681208 Validation Loss: 2.709455 Validation loss descreased from 2.7567636966705322 to 2.7094554901123047. save the model ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 2.731863 Test Accuracy: 67% (566/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img_file=Image.open(img_path); transformer=transforms.Compose( [transforms.Resize(size=(224)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_tranformed=transformer(img_file).cuda() img_batch=torch.unsqueeze(img_tranformed,0) model_transfer.eval() output=model_transfer(img_batch) _, index = torch.max(output, 1) return class_names[int(index)] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path): print('Hello, human! ') plt.imshow(plt.imread(img_path)); # display the image plt.show() print('You look like a...') print(predict_breed_transfer(img_path)) elif dog_detector(img_path): print('Hello, dog! ') plt.imshow(plt.imread(img_path)); # display the image plt.show() print('Your predicted breed is...') print(predict_breed_transfer(img_path)) else: print('Error, the image is neither a human nor a dog!') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)- feed more data into training- when training use more epochs- choose the latest and strongest pre-trained model to transfer learning ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) print('-------') ###Output Hello, human! ###Markdown Setup Google Colab https://colab.research.google.com/drive/1j7wXb-4bg4jU0uYeAoDGhguN9osRuage ###Code run_on_colab = True ###Output _____no_output_____ ###Markdown Download & Setup datasets Download Datasets > wget > -nc: to prevent redownload ###Code !wget -nc "https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip" !wget -nc "https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip" ###Output --2021-11-28 13:23:56-- https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip Resolving s3-us-west-1.amazonaws.com (s3-us-west-1.amazonaws.com)... 52.219.112.104 Connecting to s3-us-west-1.amazonaws.com (s3-us-west-1.amazonaws.com)|52.219.112.104|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 1132023110 (1.1G) [application/zip] Saving to: ‘dogImages.zip’ dogImages.zip 100%[===================>] 1.05G 20.0MB/s in 57s 2021-11-28 13:24:54 (19.0 MB/s) - ‘dogImages.zip’ saved [1132023110/1132023110] ###Markdown unzip datasets > -n to prevent overwriting ###Code !unzip -n "lfw.zip" !unzip -n "dogImages.zip" ###Output Streaming output truncated to the last 5000 lines. inflating: dogImages/train/048.Chihuahua/Chihuahua_03417.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03418.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03421.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03423.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03424.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03425.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03426.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03428.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03429.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03430.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03431.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03432.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03433.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03434.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03435.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03436.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03437.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03438.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03439.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03440.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03441.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03442.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03443.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03444.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03446.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03447.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03449.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03450.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03451.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03452.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03453.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03454.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03455.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03456.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03458.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03461.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03462.jpg inflating: dogImages/train/048.Chihuahua/Chihuahua_03463.jpg creating: dogImages/train/049.Chinese_crested/ inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03465.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03466.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03468.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03469.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03470.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03471.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03472.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03475.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03476.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03477.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03478.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03479.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03480.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03481.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03483.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03485.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03486.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03487.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03488.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03491.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03492.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03494.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03495.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03496.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03499.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03500.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03501.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03502.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03503.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03505.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03506.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03507.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03509.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03510.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03511.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03512.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03513.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03514.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03515.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03516.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03517.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03518.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03519.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03520.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03521.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03522.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03523.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03525.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03526.jpg inflating: dogImages/train/049.Chinese_crested/Chinese_crested_03527.jpg creating: dogImages/train/050.Chinese_shar-pei/ inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03528.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03529.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03530.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03532.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03534.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03536.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03537.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03538.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03539.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03541.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03542.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03543.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03544.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03545.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03547.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03548.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03549.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03550.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03551.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03552.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03553.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03554.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03557.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03558.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03559.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03560.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03561.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03563.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03564.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03565.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03567.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03569.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03570.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03571.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03572.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03573.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03574.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03576.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03577.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03578.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03579.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03581.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03582.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03583.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03584.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03585.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03586.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03587.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03588.jpg inflating: dogImages/train/050.Chinese_shar-pei/Chinese_shar-pei_03589.jpg creating: dogImages/train/051.Chow_chow/ inflating: dogImages/train/051.Chow_chow/Chow_chow_03590.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03593.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03594.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03595.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03596.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03597.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03598.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03600.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03601.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03602.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03603.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03604.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03605.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03607.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03609.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03610.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03611.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03613.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03614.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03615.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03616.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03617.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03618.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03619.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03620.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03621.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03622.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03623.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03625.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03626.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03627.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03630.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03631.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03632.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03633.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03634.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03635.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03638.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03639.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03640.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03641.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03642.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03643.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03644.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03645.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03646.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03647.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03648.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03649.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03650.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03651.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03652.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03653.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03655.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03656.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03658.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03659.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03662.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03663.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03665.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03666.jpg inflating: dogImages/train/051.Chow_chow/Chow_chow_03667.jpg creating: dogImages/train/052.Clumber_spaniel/ inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03668.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03669.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03671.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03673.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03674.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03675.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03676.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03677.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03678.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03681.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03682.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03683.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03685.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03686.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03687.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03688.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03689.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03690.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03691.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03692.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03693.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03694.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03695.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03696.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03699.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03700.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03701.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03704.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03705.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03706.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03707.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03708.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03709.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03710.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03711.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03713.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03714.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03716.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03717.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03718.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03719.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03720.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03721.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03722.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03723.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03725.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03726.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03727.jpg inflating: dogImages/train/052.Clumber_spaniel/Clumber_spaniel_03728.jpg creating: dogImages/train/053.Cocker_spaniel/ inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03729.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03730.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03731.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03732.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03734.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03735.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03736.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03737.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03738.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03739.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03741.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03744.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03745.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03746.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03747.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03749.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03750.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03751.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03752.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03753.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03754.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03755.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03756.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03757.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03758.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03760.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03761.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03762.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03763.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03764.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03767.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03768.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03770.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03771.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03772.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03773.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03774.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03777.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03778.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03780.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03781.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03782.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03783.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03784.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03785.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03786.jpg inflating: dogImages/train/053.Cocker_spaniel/Cocker_spaniel_03787.jpg creating: dogImages/train/054.Collie/ inflating: dogImages/train/054.Collie/Collie_03788.jpg inflating: dogImages/train/054.Collie/Collie_03789.jpg inflating: dogImages/train/054.Collie/Collie_03792.jpg inflating: dogImages/train/054.Collie/Collie_03793.jpg inflating: dogImages/train/054.Collie/Collie_03795.jpg inflating: dogImages/train/054.Collie/Collie_03796.jpg inflating: dogImages/train/054.Collie/Collie_03798.jpg inflating: dogImages/train/054.Collie/Collie_03799.jpg inflating: dogImages/train/054.Collie/Collie_03800.jpg inflating: dogImages/train/054.Collie/Collie_03801.jpg inflating: dogImages/train/054.Collie/Collie_03802.jpg inflating: dogImages/train/054.Collie/Collie_03803.jpg inflating: dogImages/train/054.Collie/Collie_03804.jpg inflating: dogImages/train/054.Collie/Collie_03805.jpg inflating: dogImages/train/054.Collie/Collie_03806.jpg inflating: dogImages/train/054.Collie/Collie_03807.jpg inflating: dogImages/train/054.Collie/Collie_03808.jpg inflating: dogImages/train/054.Collie/Collie_03809.jpg inflating: dogImages/train/054.Collie/Collie_03810.jpg inflating: dogImages/train/054.Collie/Collie_03811.jpg inflating: dogImages/train/054.Collie/Collie_03812.jpg inflating: dogImages/train/054.Collie/Collie_03813.jpg inflating: dogImages/train/054.Collie/Collie_03814.jpg inflating: dogImages/train/054.Collie/Collie_03815.jpg inflating: dogImages/train/054.Collie/Collie_03816.jpg inflating: dogImages/train/054.Collie/Collie_03817.jpg inflating: dogImages/train/054.Collie/Collie_03818.jpg inflating: dogImages/train/054.Collie/Collie_03820.jpg inflating: dogImages/train/054.Collie/Collie_03821.jpg inflating: dogImages/train/054.Collie/Collie_03822.jpg inflating: dogImages/train/054.Collie/Collie_03823.jpg inflating: dogImages/train/054.Collie/Collie_03824.jpg inflating: dogImages/train/054.Collie/Collie_03825.jpg inflating: dogImages/train/054.Collie/Collie_03826.jpg inflating: dogImages/train/054.Collie/Collie_03827.jpg inflating: dogImages/train/054.Collie/Collie_03828.jpg inflating: dogImages/train/054.Collie/Collie_03829.jpg inflating: dogImages/train/054.Collie/Collie_03830.jpg inflating: dogImages/train/054.Collie/Collie_03831.jpg inflating: dogImages/train/054.Collie/Collie_03832.jpg inflating: dogImages/train/054.Collie/Collie_03836.jpg inflating: dogImages/train/054.Collie/Collie_03837.jpg inflating: dogImages/train/054.Collie/Collie_03838.jpg inflating: dogImages/train/054.Collie/Collie_03839.jpg inflating: dogImages/train/054.Collie/Collie_03840.jpg inflating: dogImages/train/054.Collie/Collie_03841.jpg inflating: dogImages/train/054.Collie/Collie_03843.jpg inflating: dogImages/train/054.Collie/Collie_03844.jpg inflating: dogImages/train/054.Collie/Collie_03845.jpg inflating: dogImages/train/054.Collie/Collie_03846.jpg inflating: dogImages/train/054.Collie/Collie_03847.jpg inflating: dogImages/train/054.Collie/Collie_03848.jpg inflating: dogImages/train/054.Collie/Collie_03850.jpg inflating: dogImages/train/054.Collie/Collie_03852.jpg inflating: dogImages/train/054.Collie/Collie_03853.jpg inflating: dogImages/train/054.Collie/Collie_03856.jpg inflating: dogImages/train/054.Collie/Collie_03857.jpg creating: dogImages/train/055.Curly-coated_retriever/ inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03859.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03860.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03861.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03862.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03864.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03865.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03866.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03868.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03871.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03873.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03874.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03875.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03876.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03877.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03878.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03879.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03880.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03883.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03886.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03888.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03890.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03891.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03892.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03894.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03895.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03896.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03897.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03898.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03899.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03900.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03902.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03903.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03904.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03905.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03906.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03907.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03908.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03909.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03910.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03911.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03912.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03913.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03914.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03915.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03916.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03917.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03918.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03919.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03920.jpg inflating: dogImages/train/055.Curly-coated_retriever/Curly-coated_retriever_03921.jpg creating: dogImages/train/056.Dachshund/ inflating: dogImages/train/056.Dachshund/Dachshund_03922.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03923.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03924.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03925.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03927.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03928.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03930.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03931.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03932.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03933.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03934.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03935.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03937.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03938.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03939.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03940.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03941.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03942.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03943.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03944.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03945.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03947.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03948.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03949.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03950.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03951.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03952.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03954.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03955.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03956.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03957.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03958.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03959.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03961.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03962.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03963.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03964.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03965.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03968.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03969.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03970.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03971.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03972.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03974.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03975.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03976.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03977.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03978.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03979.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03980.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03982.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03983.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03984.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03986.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03988.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03989.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03990.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03992.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03993.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03994.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03998.jpg inflating: dogImages/train/056.Dachshund/Dachshund_03999.jpg inflating: dogImages/train/056.Dachshund/Dachshund_04000.jpg inflating: dogImages/train/056.Dachshund/Dachshund_04001.jpg inflating: dogImages/train/056.Dachshund/Dachshund_04002.jpg creating: dogImages/train/057.Dalmatian/ inflating: dogImages/train/057.Dalmatian/Dalmatian_04004.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04007.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04008.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04009.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04010.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04011.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04012.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04013.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04016.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04017.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04018.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04019.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04022.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04023.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04024.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04025.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04026.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04027.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04028.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04029.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04031.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04032.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04033.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04034.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04035.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04036.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04037.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04038.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04039.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04040.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04041.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04042.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04043.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04044.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04045.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04046.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04048.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04049.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04050.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04052.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04053.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04054.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04055.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04061.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04062.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04063.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04064.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04065.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04067.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04068.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04069.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04070.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04071.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04072.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04073.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04074.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04075.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04077.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04078.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04080.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04081.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04082.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04083.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04084.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04085.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04086.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04087.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04089.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04090.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04091.jpg inflating: dogImages/train/057.Dalmatian/Dalmatian_04092.jpg creating: dogImages/train/058.Dandie_dinmont_terrier/ inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04093.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04094.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04095.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04097.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04098.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04099.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04100.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04101.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04102.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04103.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04104.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04105.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04106.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04107.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04109.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04110.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04111.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04112.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04113.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04114.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04115.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04116.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04118.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04119.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04120.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04121.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04122.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04123.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04125.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04127.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04128.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04129.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04130.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04132.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04133.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04135.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04136.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04137.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04139.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04140.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04141.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04143.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04145.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04146.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04147.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04148.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04150.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04151.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04153.jpg inflating: dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04154.jpg creating: dogImages/train/059.Doberman_pinscher/ inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04157.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04159.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04160.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04161.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04162.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04163.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04164.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04166.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04167.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04168.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04170.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04171.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04172.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04173.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04174.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04175.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04176.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04179.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04180.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04181.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04182.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04183.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04184.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04185.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04186.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04187.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04188.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04189.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04191.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04192.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04193.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04195.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04196.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04198.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04199.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04200.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04201.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04204.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04205.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04206.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04207.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04209.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04210.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04211.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04212.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04213.jpg inflating: dogImages/train/059.Doberman_pinscher/Doberman_pinscher_04214.jpg creating: dogImages/train/060.Dogue_de_bordeaux/ inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04215.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04217.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04218.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04219.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04220.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04222.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04223.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04224.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04226.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04227.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04228.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04230.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04231.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04232.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04233.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04234.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04236.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04237.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04238.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04239.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04240.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04241.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04242.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04243.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04244.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04245.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04247.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04248.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04249.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04250.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04251.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04252.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04253.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04254.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04255.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04257.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04260.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04261.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04262.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04263.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04264.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04266.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04267.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04268.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04269.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04272.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04273.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04275.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04276.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04277.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04278.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04279.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04280.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04283.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04284.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04285.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04286.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04287.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04288.jpg inflating: dogImages/train/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04289.jpg creating: dogImages/train/061.English_cocker_spaniel/ inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04290.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04291.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04292.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04293.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04294.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04296.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04297.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04298.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04299.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04300.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04301.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04304.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04305.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04306.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04307.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04308.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04309.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04310.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04311.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04312.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04314.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04316.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04317.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04318.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04319.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04320.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04321.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04324.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04325.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04326.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04327.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04328.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04329.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04331.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04332.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04334.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04335.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04336.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04337.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04339.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04340.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04342.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04343.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04344.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04345.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04346.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04348.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04349.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04350.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04351.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04352.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04353.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04355.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04356.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04357.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04358.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04360.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04361.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04362.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04363.jpg inflating: dogImages/train/061.English_cocker_spaniel/English_cocker_spaniel_04365.jpg creating: dogImages/train/062.English_setter/ inflating: dogImages/train/062.English_setter/English_setter_04366.jpg inflating: dogImages/train/062.English_setter/English_setter_04367.jpg inflating: dogImages/train/062.English_setter/English_setter_04368.jpg inflating: dogImages/train/062.English_setter/English_setter_04370.jpg inflating: dogImages/train/062.English_setter/English_setter_04371.jpg inflating: dogImages/train/062.English_setter/English_setter_04373.jpg inflating: dogImages/train/062.English_setter/English_setter_04374.jpg inflating: dogImages/train/062.English_setter/English_setter_04375.jpg inflating: dogImages/train/062.English_setter/English_setter_04376.jpg inflating: dogImages/train/062.English_setter/English_setter_04377.jpg inflating: dogImages/train/062.English_setter/English_setter_04378.jpg inflating: dogImages/train/062.English_setter/English_setter_04380.jpg inflating: dogImages/train/062.English_setter/English_setter_04381.jpg inflating: dogImages/train/062.English_setter/English_setter_04382.jpg inflating: dogImages/train/062.English_setter/English_setter_04383.jpg inflating: dogImages/train/062.English_setter/English_setter_04384.jpg inflating: dogImages/train/062.English_setter/English_setter_04387.jpg inflating: dogImages/train/062.English_setter/English_setter_04389.jpg inflating: dogImages/train/062.English_setter/English_setter_04390.jpg inflating: dogImages/train/062.English_setter/English_setter_04392.jpg inflating: dogImages/train/062.English_setter/English_setter_04393.jpg inflating: dogImages/train/062.English_setter/English_setter_04394.jpg inflating: dogImages/train/062.English_setter/English_setter_04395.jpg inflating: dogImages/train/062.English_setter/English_setter_04396.jpg inflating: dogImages/train/062.English_setter/English_setter_04397.jpg inflating: dogImages/train/062.English_setter/English_setter_04400.jpg inflating: dogImages/train/062.English_setter/English_setter_04401.jpg inflating: dogImages/train/062.English_setter/English_setter_04402.jpg inflating: dogImages/train/062.English_setter/English_setter_04403.jpg inflating: dogImages/train/062.English_setter/English_setter_04404.jpg inflating: dogImages/train/062.English_setter/English_setter_04405.jpg inflating: dogImages/train/062.English_setter/English_setter_04406.jpg inflating: dogImages/train/062.English_setter/English_setter_04407.jpg inflating: dogImages/train/062.English_setter/English_setter_04408.jpg inflating: dogImages/train/062.English_setter/English_setter_04409.jpg inflating: dogImages/train/062.English_setter/English_setter_04410.jpg inflating: dogImages/train/062.English_setter/English_setter_04411.jpg inflating: dogImages/train/062.English_setter/English_setter_04413.jpg inflating: dogImages/train/062.English_setter/English_setter_04415.jpg inflating: dogImages/train/062.English_setter/English_setter_04416.jpg inflating: dogImages/train/062.English_setter/English_setter_04417.jpg inflating: dogImages/train/062.English_setter/English_setter_04418.jpg inflating: dogImages/train/062.English_setter/English_setter_04419.jpg inflating: dogImages/train/062.English_setter/English_setter_04420.jpg inflating: dogImages/train/062.English_setter/English_setter_04421.jpg inflating: dogImages/train/062.English_setter/English_setter_04422.jpg inflating: dogImages/train/062.English_setter/English_setter_04423.jpg inflating: dogImages/train/062.English_setter/English_setter_04425.jpg inflating: dogImages/train/062.English_setter/English_setter_04426.jpg inflating: dogImages/train/062.English_setter/English_setter_04427.jpg inflating: dogImages/train/062.English_setter/English_setter_04428.jpg inflating: dogImages/train/062.English_setter/English_setter_04430.jpg inflating: dogImages/train/062.English_setter/English_setter_04431.jpg creating: dogImages/train/063.English_springer_spaniel/ inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04432.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04433.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04435.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04436.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04438.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04439.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04441.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04442.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04443.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04444.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04445.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04447.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04448.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04449.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04451.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04454.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04455.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04456.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04458.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04459.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04460.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04461.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04462.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04463.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04464.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04465.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04467.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04468.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04469.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04470.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04471.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04472.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04473.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04474.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04475.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04476.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04477.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04478.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04479.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04480.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04481.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04482.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04483.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04485.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04487.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04488.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04489.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04490.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04492.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04493.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04494.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04495.jpg inflating: dogImages/train/063.English_springer_spaniel/English_springer_spaniel_04497.jpg creating: dogImages/train/064.English_toy_spaniel/ inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04498.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04500.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04501.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04502.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04503.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04504.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04505.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04506.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04507.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04510.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04511.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04512.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04514.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04516.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04517.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04519.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04520.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04521.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04522.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04523.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04524.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04525.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04526.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04527.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04528.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04529.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04531.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04533.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04534.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04535.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04536.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04537.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04538.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04539.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04540.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04543.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04544.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04545.jpg inflating: dogImages/train/064.English_toy_spaniel/English_toy_spaniel_04546.jpg creating: dogImages/train/065.Entlebucher_mountain_dog/ inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04547.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04548.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04549.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04550.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04551.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04552.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04553.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04554.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04555.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04557.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04558.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04560.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04563.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04564.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04565.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04566.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04567.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04569.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04570.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04571.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04572.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04573.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04575.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04576.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04577.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04578.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04579.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04581.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04582.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04583.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04584.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04585.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04586.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04588.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04589.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04591.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04592.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04593.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04594.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04596.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04598.jpg inflating: dogImages/train/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04599.jpg creating: dogImages/train/066.Field_spaniel/ inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04600.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04601.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04603.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04604.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04605.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04606.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04607.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04609.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04612.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04613.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04615.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04617.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04618.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04620.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04621.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04623.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04624.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04625.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04626.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04627.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04628.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04629.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04630.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04631.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04632.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04633.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04634.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04635.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04636.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04637.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04638.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04639.jpg inflating: dogImages/train/066.Field_spaniel/Field_spaniel_04640.jpg creating: dogImages/train/067.Finnish_spitz/ inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04641.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04642.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04643.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04644.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04645.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04646.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04647.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04649.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04653.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04654.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04655.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04656.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04657.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04658.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04659.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04660.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04662.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04663.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04664.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04665.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04667.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04668.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04669.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04670.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04671.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04672.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04673.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04674.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04675.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04677.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04678.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04679.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04681.jpg inflating: dogImages/train/067.Finnish_spitz/Finnish_spitz_04682.jpg creating: dogImages/train/068.Flat-coated_retriever/ inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04684.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04687.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04688.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04689.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04690.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04691.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04692.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04693.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04695.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04696.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04697.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04698.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04700.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04701.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04702.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04706.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04707.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04708.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04709.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04710.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04712.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04713.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04714.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04715.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04716.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04717.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04719.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04720.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04721.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04722.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04723.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04724.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04726.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04727.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04728.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04729.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04730.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04731.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04732.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04733.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04734.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04735.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04736.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04737.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04738.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04739.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04740.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04741.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04743.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04744.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04746.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04747.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04748.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04749.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04751.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04752.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04753.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04755.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04756.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04757.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04759.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04760.jpg inflating: dogImages/train/068.Flat-coated_retriever/Flat-coated_retriever_04761.jpg creating: dogImages/train/069.French_bulldog/ inflating: dogImages/train/069.French_bulldog/French_bulldog_04762.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04763.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04765.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04766.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04767.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04768.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04769.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04772.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04773.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04774.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04776.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04777.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04778.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04779.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04780.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04781.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04782.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04783.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04785.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04786.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04787.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04788.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04789.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04790.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04791.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04793.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04794.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04795.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04796.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04797.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04798.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04799.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04800.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04801.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04802.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04803.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04805.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04808.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04809.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04810.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04811.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04812.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04814.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04815.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04817.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04818.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04819.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04820.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04821.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04823.jpg inflating: dogImages/train/069.French_bulldog/French_bulldog_04825.jpg creating: dogImages/train/070.German_pinscher/ inflating: dogImages/train/070.German_pinscher/German_pinscher_04826.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04827.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04828.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04829.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04831.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04832.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04833.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04834.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04835.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04836.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04837.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04839.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04840.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04841.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04842.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04844.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04846.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04847.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04848.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04850.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04851.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04852.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04853.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04855.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04856.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04857.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04858.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04859.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04860.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04864.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04865.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04866.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04867.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04868.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04869.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04870.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04872.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04873.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04874.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04877.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04878.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04879.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04880.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04881.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04882.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04883.jpg inflating: dogImages/train/070.German_pinscher/German_pinscher_04884.jpg creating: dogImages/train/071.German_shepherd_dog/ inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04885.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04887.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04888.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04889.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04890.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04891.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04892.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04893.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04894.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04895.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04896.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04898.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04899.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04901.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04902.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04903.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04904.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04905.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04906.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04907.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04908.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04909.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04911.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04913.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04914.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04916.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04917.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04919.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04920.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04922.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04923.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04924.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04925.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04926.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04927.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04928.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04929.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04930.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04932.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04933.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04935.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04937.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04939.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04940.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04941.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04942.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04943.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04944.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04945.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04946.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04948.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04949.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04950.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04951.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04952.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04953.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04954.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04956.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04957.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04958.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04959.jpg inflating: dogImages/train/071.German_shepherd_dog/German_shepherd_dog_04962.jpg creating: dogImages/train/072.German_shorthaired_pointer/ inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04963.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04964.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04968.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04969.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04972.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04973.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04975.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04976.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04977.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04978.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04979.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04982.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04983.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04984.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04987.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04988.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04989.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04990.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04991.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04992.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04993.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04994.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04995.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04996.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04997.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_04999.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05000.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05001.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05002.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05003.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05004.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05005.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05006.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05008.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05009.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05010.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05011.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05012.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05013.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05014.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05015.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05016.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05017.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05018.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05019.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05020.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05021.jpg inflating: dogImages/train/072.German_shorthaired_pointer/German_shorthaired_pointer_05022.jpg creating: dogImages/train/073.German_wirehaired_pointer/ inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05023.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05024.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05025.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05026.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05028.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05029.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05030.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05032.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05033.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05034.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05035.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05036.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05037.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05038.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05039.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05042.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05044.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05045.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05046.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05047.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05048.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05049.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05050.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05051.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05052.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05053.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05055.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05056.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05057.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05058.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05059.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05061.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05063.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05065.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05066.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05067.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05068.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05069.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05071.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05072.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05073.jpg inflating: dogImages/train/073.German_wirehaired_pointer/German_wirehaired_pointer_05074.jpg creating: dogImages/train/074.Giant_schnauzer/ inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05076.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05077.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05078.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05079.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05080.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05081.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05082.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05083.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05084.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05085.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05086.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05087.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05088.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05090.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05091.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05092.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05093.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05094.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05095.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05096.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05097.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05098.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05100.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05101.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05103.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05105.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05106.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05108.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05109.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05111.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05112.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05113.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05114.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05115.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05116.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05117.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05118.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05119.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05121.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05124.jpg inflating: dogImages/train/074.Giant_schnauzer/Giant_schnauzer_05125.jpg creating: dogImages/train/075.Glen_of_imaal_terrier/ inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05126.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05127.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05128.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05130.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05131.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05133.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05134.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05136.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05137.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05138.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05139.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05140.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05141.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05142.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05144.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05145.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05146.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05147.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05150.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05151.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05154.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05155.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05157.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05158.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05159.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05160.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05162.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05163.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05165.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05166.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05167.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05168.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05169.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05170.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05171.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05172.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05173.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05174.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05175.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05176.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05177.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05178.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05179.jpg inflating: dogImages/train/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05180.jpg creating: dogImages/train/076.Golden_retriever/ inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05181.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05182.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05183.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05184.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05185.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05186.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05187.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05189.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05190.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05191.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05192.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05193.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05195.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05197.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05198.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05199.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05200.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05201.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05202.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05203.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05204.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05205.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05206.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05207.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05208.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05209.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05210.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05211.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05212.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05213.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05215.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05216.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05217.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05218.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05219.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05222.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05223.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05224.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05225.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05226.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05227.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05229.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05230.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05231.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05232.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05233.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05235.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05236.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05237.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05238.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05239.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05242.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05243.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05244.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05246.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05247.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05249.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05252.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05253.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05254.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05255.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05257.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05259.jpg inflating: dogImages/train/076.Golden_retriever/Golden_retriever_05260.jpg creating: dogImages/train/077.Gordon_setter/ inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05261.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05262.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05263.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05264.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05266.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05267.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05268.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05269.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05270.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05272.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05273.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05275.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05276.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05277.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05278.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05279.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05283.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05285.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05286.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05287.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05288.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05289.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05290.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05291.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05292.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05293.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05294.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05295.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05296.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05297.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05298.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05299.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05300.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05301.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05303.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05305.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05306.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05307.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05308.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05309.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05311.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05313.jpg inflating: dogImages/train/077.Gordon_setter/Gordon_setter_05314.jpg creating: dogImages/train/078.Great_dane/ inflating: dogImages/train/078.Great_dane/Great_dane_05315.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05316.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05317.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05318.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05319.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05320.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05321.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05324.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05325.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05327.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05330.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05331.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05332.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05333.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05334.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05335.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05336.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05337.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05338.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05339.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05340.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05341.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05342.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05343.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05344.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05345.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05347.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05349.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05350.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05351.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05352.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05353.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05354.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05356.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05357.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05358.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05359.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05360.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05361.jpg inflating: dogImages/train/078.Great_dane/Great_dane_05364.jpg creating: dogImages/train/079.Great_pyrenees/ inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05366.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05369.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05370.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05371.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05372.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05373.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05374.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05375.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05376.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05377.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05378.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05380.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05381.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05382.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05383.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05384.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05385.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05386.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05387.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05388.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05389.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05390.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05391.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05392.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05393.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05394.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05396.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05397.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05398.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05399.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05400.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05401.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05402.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05404.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05405.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05407.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05408.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05409.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05411.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05412.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05414.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05416.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05417.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05418.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05419.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05420.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05421.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05426.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05427.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05428.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05429.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05430.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05432.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05433.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05434.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05435.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05436.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05437.jpg inflating: dogImages/train/079.Great_pyrenees/Great_pyrenees_05438.jpg creating: dogImages/train/080.Greater_swiss_mountain_dog/ inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05439.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05440.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05441.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05442.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05443.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05444.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05445.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05446.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05447.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05448.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05449.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05450.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05452.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05453.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05455.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05456.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05457.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05458.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05461.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05462.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05463.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05466.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05467.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05468.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05470.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05471.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05472.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05474.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05475.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05476.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05477.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05479.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05480.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05481.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05482.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05483.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05484.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05485.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05487.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05488.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05490.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05491.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05492.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05493.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05494.jpg inflating: dogImages/train/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05495.jpg creating: dogImages/train/081.Greyhound/ inflating: dogImages/train/081.Greyhound/Greyhound_05496.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05498.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05499.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05500.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05501.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05502.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05503.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05504.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05505.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05506.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05507.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05508.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05509.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05510.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05511.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05512.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05513.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05514.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05515.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05516.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05517.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05518.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05519.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05520.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05522.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05524.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05525.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05527.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05529.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05532.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05533.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05534.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05535.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05536.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05537.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05538.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05540.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05541.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05543.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05544.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05545.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05546.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05547.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05548.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05549.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05550.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05553.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05554.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05555.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05556.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05557.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05558.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05559.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05561.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05562.jpg inflating: dogImages/train/081.Greyhound/Greyhound_05564.jpg creating: dogImages/train/082.Havanese/ inflating: dogImages/train/082.Havanese/Havanese_05567.jpg inflating: dogImages/train/082.Havanese/Havanese_05568.jpg inflating: dogImages/train/082.Havanese/Havanese_05572.jpg inflating: dogImages/train/082.Havanese/Havanese_05575.jpg inflating: dogImages/train/082.Havanese/Havanese_05577.jpg inflating: dogImages/train/082.Havanese/Havanese_05578.jpg inflating: dogImages/train/082.Havanese/Havanese_05579.jpg inflating: dogImages/train/082.Havanese/Havanese_05580.jpg inflating: dogImages/train/082.Havanese/Havanese_05581.jpg inflating: dogImages/train/082.Havanese/Havanese_05582.jpg inflating: dogImages/train/082.Havanese/Havanese_05583.jpg inflating: dogImages/train/082.Havanese/Havanese_05584.jpg inflating: dogImages/train/082.Havanese/Havanese_05586.jpg inflating: dogImages/train/082.Havanese/Havanese_05587.jpg inflating: dogImages/train/082.Havanese/Havanese_05588.jpg inflating: dogImages/train/082.Havanese/Havanese_05589.jpg inflating: dogImages/train/082.Havanese/Havanese_05590.jpg inflating: dogImages/train/082.Havanese/Havanese_05591.jpg inflating: dogImages/train/082.Havanese/Havanese_05593.jpg inflating: dogImages/train/082.Havanese/Havanese_05594.jpg inflating: dogImages/train/082.Havanese/Havanese_05595.jpg inflating: dogImages/train/082.Havanese/Havanese_05596.jpg inflating: dogImages/train/082.Havanese/Havanese_05597.jpg inflating: dogImages/train/082.Havanese/Havanese_05598.jpg inflating: dogImages/train/082.Havanese/Havanese_05599.jpg inflating: dogImages/train/082.Havanese/Havanese_05600.jpg inflating: dogImages/train/082.Havanese/Havanese_05601.jpg inflating: dogImages/train/082.Havanese/Havanese_05602.jpg inflating: dogImages/train/082.Havanese/Havanese_05603.jpg inflating: dogImages/train/082.Havanese/Havanese_05604.jpg inflating: dogImages/train/082.Havanese/Havanese_05605.jpg inflating: dogImages/train/082.Havanese/Havanese_05606.jpg inflating: dogImages/train/082.Havanese/Havanese_05607.jpg inflating: dogImages/train/082.Havanese/Havanese_05608.jpg inflating: dogImages/train/082.Havanese/Havanese_05609.jpg inflating: dogImages/train/082.Havanese/Havanese_05610.jpg inflating: dogImages/train/082.Havanese/Havanese_05611.jpg inflating: dogImages/train/082.Havanese/Havanese_05612.jpg inflating: dogImages/train/082.Havanese/Havanese_05613.jpg inflating: dogImages/train/082.Havanese/Havanese_05615.jpg inflating: dogImages/train/082.Havanese/Havanese_05616.jpg inflating: dogImages/train/082.Havanese/Havanese_05617.jpg inflating: dogImages/train/082.Havanese/Havanese_05618.jpg inflating: dogImages/train/082.Havanese/Havanese_05619.jpg inflating: dogImages/train/082.Havanese/Havanese_05620.jpg inflating: dogImages/train/082.Havanese/Havanese_05621.jpg inflating: dogImages/train/082.Havanese/Havanese_05622.jpg inflating: dogImages/train/082.Havanese/Havanese_05623.jpg inflating: dogImages/train/082.Havanese/Havanese_05624.jpg inflating: dogImages/train/082.Havanese/Havanese_05627.jpg inflating: dogImages/train/082.Havanese/Havanese_05628.jpg inflating: dogImages/train/082.Havanese/Havanese_05629.jpg inflating: dogImages/train/082.Havanese/Havanese_05630.jpg inflating: dogImages/train/082.Havanese/Havanese_05632.jpg inflating: dogImages/train/082.Havanese/Havanese_05633.jpg inflating: dogImages/train/082.Havanese/Havanese_05634.jpg inflating: dogImages/train/082.Havanese/Havanese_05635.jpg inflating: dogImages/train/082.Havanese/Havanese_05636.jpg inflating: dogImages/train/082.Havanese/Havanese_05638.jpg inflating: dogImages/train/082.Havanese/Havanese_05639.jpg inflating: dogImages/train/082.Havanese/Havanese_05640.jpg creating: dogImages/train/083.Ibizan_hound/ inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05643.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05644.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05645.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05646.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05648.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05649.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05650.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05651.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05652.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05654.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05655.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05656.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05657.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05658.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05660.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05661.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05662.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05663.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05664.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05665.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05666.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05667.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05668.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05669.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05670.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05671.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05672.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05674.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05677.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05678.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05680.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05681.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05683.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05684.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05685.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05686.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05687.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05689.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05690.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05691.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05692.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05694.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05695.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05696.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05698.jpg inflating: dogImages/train/083.Ibizan_hound/Ibizan_hound_05699.jpg creating: dogImages/train/084.Icelandic_sheepdog/ inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05700.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05702.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05703.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05704.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05705.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05706.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05707.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05708.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05709.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05710.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05711.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05712.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05714.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05715.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05716.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05717.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05718.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05720.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05721.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05722.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05723.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05724.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05725.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05726.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05727.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05728.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05729.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05730.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05731.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05732.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05734.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05735.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05736.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05737.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05739.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05741.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05743.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05744.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05745.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05747.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05748.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05750.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05751.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05752.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05754.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05755.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05756.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05758.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05760.jpg inflating: dogImages/train/084.Icelandic_sheepdog/Icelandic_sheepdog_05761.jpg creating: dogImages/train/085.Irish_red_and_white_setter/ inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05762.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05763.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05764.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05765.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05767.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05768.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05769.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05771.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05774.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05776.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05777.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05779.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05780.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05781.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05782.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05783.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05784.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05785.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05786.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05787.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05788.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05790.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05791.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05792.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05793.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05794.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05795.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05797.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05798.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05799.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05801.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05802.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05803.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05804.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05805.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05806.jpg inflating: dogImages/train/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05807.jpg creating: dogImages/train/086.Irish_setter/ inflating: dogImages/train/086.Irish_setter/Irish_setter_05808.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05809.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05810.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05811.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05813.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05814.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05815.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05817.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05820.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05821.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05822.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05823.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05824.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05825.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05828.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05829.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05830.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05831.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05832.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05834.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05835.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05836.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05837.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05838.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05839.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05840.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05841.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05842.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05843.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05844.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05845.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05847.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05848.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05849.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05850.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05852.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05853.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05854.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05856.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05857.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05858.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05859.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05861.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05862.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05863.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05865.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05866.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05867.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05868.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05869.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05870.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05871.jpg inflating: dogImages/train/086.Irish_setter/Irish_setter_05872.jpg creating: dogImages/train/087.Irish_terrier/ inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05874.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05875.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05876.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05877.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05879.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05881.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05882.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05883.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05884.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05885.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05886.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05888.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05891.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05892.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05893.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05894.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05896.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05897.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05898.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05899.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05901.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05902.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05904.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05905.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05906.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05907.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05908.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05909.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05910.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05911.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05913.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05914.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05915.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05916.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05917.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05920.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05921.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05922.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05925.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05926.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05927.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05929.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05930.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05931.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05932.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05933.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05934.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05935.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05936.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05938.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05939.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05940.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05941.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05942.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05943.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05944.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05945.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05946.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05947.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05948.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05949.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05951.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05952.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05953.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05954.jpg inflating: dogImages/train/087.Irish_terrier/Irish_terrier_05955.jpg creating: dogImages/train/088.Irish_water_spaniel/ inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05957.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05958.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05959.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05960.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05961.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05963.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05965.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05966.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05967.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05968.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05970.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05973.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05974.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05976.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05977.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05978.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05979.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05980.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05981.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05982.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05984.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05985.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05987.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05988.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05989.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05991.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05992.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05993.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05994.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05995.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05996.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05997.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05998.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_05999.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06000.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06001.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06002.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06003.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06004.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06005.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06006.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06008.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06009.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06010.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06012.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06013.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06014.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06015.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06017.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06018.jpg inflating: dogImages/train/088.Irish_water_spaniel/Irish_water_spaniel_06019.jpg creating: dogImages/train/089.Irish_wolfhound/ inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06020.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06021.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06023.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06024.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06025.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06026.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06027.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06028.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06031.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06032.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06034.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06035.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06036.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06037.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06038.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06040.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06041.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06042.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06043.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06044.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06045.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06047.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06048.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06049.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06051.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06053.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06054.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06055.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06056.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06057.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06058.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06059.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06060.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06062.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06064.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06065.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06066.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06068.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06069.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06070.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06072.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06073.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06074.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06075.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06076.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06077.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06078.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06079.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06080.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06081.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06082.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06083.jpg inflating: dogImages/train/089.Irish_wolfhound/Irish_wolfhound_06085.jpg creating: dogImages/train/090.Italian_greyhound/ inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06086.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06089.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06090.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06092.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06093.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06094.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06095.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06096.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06097.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06098.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06099.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06101.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06102.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06104.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06105.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06106.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06107.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06108.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06109.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06110.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06111.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06112.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06113.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06116.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06118.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06120.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06121.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06123.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06124.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06125.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06126.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06127.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06128.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06129.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06130.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06131.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06132.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06133.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06134.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06135.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06136.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06138.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06139.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06140.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06141.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06142.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06143.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06144.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06145.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06147.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06148.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06150.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06151.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06152.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06154.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06155.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06156.jpg inflating: dogImages/train/090.Italian_greyhound/Italian_greyhound_06158.jpg creating: dogImages/train/091.Japanese_chin/ inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06159.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06160.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06161.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06162.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06163.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06165.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06170.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06171.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06172.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06173.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06176.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06177.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06178.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06180.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06182.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06183.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06185.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06186.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06187.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06188.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06189.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06190.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06191.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06192.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06193.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06194.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06195.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06196.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06197.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06198.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06199.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06200.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06201.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06202.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06203.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06204.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06205.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06206.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06207.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06208.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06209.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06211.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06212.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06213.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06215.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06217.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06218.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06219.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06220.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06221.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06222.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06223.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06224.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06226.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06227.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06228.jpg inflating: dogImages/train/091.Japanese_chin/Japanese_chin_06229.jpg creating: dogImages/train/092.Keeshond/ inflating: dogImages/train/092.Keeshond/Keeshond_06230.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06231.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06232.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06233.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06235.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06236.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06237.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06239.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06240.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06241.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06242.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06243.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06244.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06245.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06247.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06248.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06249.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06251.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06252.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06253.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06254.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06255.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06256.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06258.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06259.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06260.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06261.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06262.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06263.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06265.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06267.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06269.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06271.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06272.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06273.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06275.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06276.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06277.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06278.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06279.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06281.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06282.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06283.jpg inflating: dogImages/train/092.Keeshond/Keeshond_06284.jpg creating: dogImages/train/093.Kerry_blue_terrier/ inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06285.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06286.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06287.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06288.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06289.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06290.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06292.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06295.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06296.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06297.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06298.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06299.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06300.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06301.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06302.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06303.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06304.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06305.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06306.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06307.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06308.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06311.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06312.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06313.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06314.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06315.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06317.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06318.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06320.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06322.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06323.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06324.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06325.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06326.jpg inflating: dogImages/train/093.Kerry_blue_terrier/Kerry_blue_terrier_06328.jpg creating: dogImages/train/094.Komondor/ inflating: dogImages/train/094.Komondor/Komondor_06329.jpg inflating: dogImages/train/094.Komondor/Komondor_06330.jpg inflating: dogImages/train/094.Komondor/Komondor_06331.jpg inflating: dogImages/train/094.Komondor/Komondor_06332.jpg inflating: dogImages/train/094.Komondor/Komondor_06333.jpg inflating: dogImages/train/094.Komondor/Komondor_06334.jpg inflating: dogImages/train/094.Komondor/Komondor_06335.jpg inflating: dogImages/train/094.Komondor/Komondor_06336.jpg inflating: dogImages/train/094.Komondor/Komondor_06337.jpg inflating: dogImages/train/094.Komondor/Komondor_06338.jpg inflating: dogImages/train/094.Komondor/Komondor_06339.jpg inflating: dogImages/train/094.Komondor/Komondor_06341.jpg inflating: dogImages/train/094.Komondor/Komondor_06342.jpg inflating: dogImages/train/094.Komondor/Komondor_06343.jpg inflating: dogImages/train/094.Komondor/Komondor_06344.jpg inflating: dogImages/train/094.Komondor/Komondor_06345.jpg inflating: dogImages/train/094.Komondor/Komondor_06347.jpg inflating: dogImages/train/094.Komondor/Komondor_06348.jpg inflating: dogImages/train/094.Komondor/Komondor_06349.jpg inflating: dogImages/train/094.Komondor/Komondor_06350.jpg inflating: dogImages/train/094.Komondor/Komondor_06352.jpg inflating: dogImages/train/094.Komondor/Komondor_06353.jpg inflating: dogImages/train/094.Komondor/Komondor_06354.jpg inflating: dogImages/train/094.Komondor/Komondor_06355.jpg inflating: dogImages/train/094.Komondor/Komondor_06356.jpg inflating: dogImages/train/094.Komondor/Komondor_06357.jpg inflating: dogImages/train/094.Komondor/Komondor_06358.jpg inflating: dogImages/train/094.Komondor/Komondor_06359.jpg inflating: dogImages/train/094.Komondor/Komondor_06360.jpg inflating: dogImages/train/094.Komondor/Komondor_06361.jpg inflating: dogImages/train/094.Komondor/Komondor_06362.jpg inflating: dogImages/train/094.Komondor/Komondor_06363.jpg inflating: dogImages/train/094.Komondor/Komondor_06365.jpg inflating: dogImages/train/094.Komondor/Komondor_06367.jpg inflating: dogImages/train/094.Komondor/Komondor_06368.jpg inflating: dogImages/train/094.Komondor/Komondor_06369.jpg inflating: dogImages/train/094.Komondor/Komondor_06370.jpg inflating: dogImages/train/094.Komondor/Komondor_06371.jpg inflating: dogImages/train/094.Komondor/Komondor_06373.jpg inflating: dogImages/train/094.Komondor/Komondor_06377.jpg inflating: dogImages/train/094.Komondor/Komondor_06379.jpg inflating: dogImages/train/094.Komondor/Komondor_06380.jpg inflating: dogImages/train/094.Komondor/Komondor_06381.jpg inflating: dogImages/train/094.Komondor/Komondor_06383.jpg creating: dogImages/train/095.Kuvasz/ inflating: dogImages/train/095.Kuvasz/Kuvasz_06384.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06385.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06386.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06387.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06389.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06390.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06391.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06392.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06393.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06394.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06395.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06396.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06397.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06398.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06399.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06400.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06401.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06402.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06403.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06405.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06406.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06407.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06408.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06410.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06411.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06413.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06415.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06416.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06417.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06418.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06419.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06423.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06424.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06425.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06426.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06427.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06428.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06431.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06432.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06434.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06436.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06437.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06438.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06439.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06440.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06441.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06442.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06443.jpg inflating: dogImages/train/095.Kuvasz/Kuvasz_06444.jpg creating: dogImages/train/096.Labrador_retriever/ inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06445.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06446.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06447.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06449.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06450.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06451.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06452.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06454.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06456.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06457.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06458.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06459.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06460.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06461.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06462.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06463.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06464.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06467.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06468.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06469.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06471.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06473.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06474.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06476.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06477.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06479.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06480.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06481.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06482.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06484.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06485.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06486.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06487.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06488.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06489.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06490.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06491.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06492.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06494.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06495.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06496.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06497.jpg inflating: dogImages/train/096.Labrador_retriever/Labrador_retriever_06498.jpg creating: dogImages/train/097.Lakeland_terrier/ inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06499.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06500.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06501.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06503.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06504.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06505.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06506.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06507.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06508.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06509.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06510.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06511.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06513.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06514.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06515.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06516.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06519.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06520.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06522.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06523.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06525.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06526.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06527.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06529.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06530.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06532.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06533.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06534.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06535.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06536.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06537.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06538.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06540.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06541.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06543.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06544.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06545.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06546.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06547.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06548.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06549.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06551.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06552.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06553.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06554.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06555.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06556.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06557.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06559.jpg inflating: dogImages/train/097.Lakeland_terrier/Lakeland_terrier_06560.jpg creating: dogImages/train/098.Leonberger/ inflating: dogImages/train/098.Leonberger/Leonberger_06561.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06562.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06563.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06564.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06565.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06567.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06568.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06570.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06571.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06573.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06574.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06575.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06577.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06579.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06580.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06581.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06582.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06583.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06585.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06586.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06587.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06589.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06590.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06592.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06593.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06594.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06596.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06597.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06598.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06599.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06600.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06601.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06602.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06603.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06604.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06605.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06606.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06607.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06608.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06610.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06611.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06612.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06613.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06614.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06615.jpg inflating: dogImages/train/098.Leonberger/Leonberger_06616.jpg creating: dogImages/train/099.Lhasa_apso/ inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06618.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06621.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06622.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06623.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06625.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06626.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06627.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06628.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06629.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06630.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06631.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06632.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06633.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06634.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06635.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06636.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06637.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06639.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06640.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06641.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06642.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06643.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06644.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06646.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06647.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06648.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06650.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06652.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06653.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06654.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06655.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06656.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06657.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06658.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06659.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06661.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06662.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06663.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06664.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06665.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06667.jpg inflating: dogImages/train/099.Lhasa_apso/Lhasa_apso_06670.jpg creating: dogImages/train/100.Lowchen/ inflating: dogImages/train/100.Lowchen/Lowchen_06671.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06672.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06673.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06675.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06676.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06677.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06678.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06679.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06680.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06681.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06683.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06686.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06687.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06688.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06689.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06690.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06691.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06692.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06693.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06695.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06697.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06698.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06700.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06701.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06702.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06703.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06704.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06705.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06706.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06707.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06709.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06710.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06711.jpg inflating: dogImages/train/100.Lowchen/Lowchen_06712.jpg creating: dogImages/train/101.Maltese/ inflating: dogImages/train/101.Maltese/Maltese_06713.jpg inflating: dogImages/train/101.Maltese/Maltese_06714.jpg inflating: dogImages/train/101.Maltese/Maltese_06715.jpg inflating: dogImages/train/101.Maltese/Maltese_06717.jpg inflating: dogImages/train/101.Maltese/Maltese_06720.jpg inflating: dogImages/train/101.Maltese/Maltese_06721.jpg inflating: dogImages/train/101.Maltese/Maltese_06722.jpg inflating: dogImages/train/101.Maltese/Maltese_06723.jpg inflating: dogImages/train/101.Maltese/Maltese_06724.jpg inflating: dogImages/train/101.Maltese/Maltese_06725.jpg inflating: dogImages/train/101.Maltese/Maltese_06726.jpg inflating: dogImages/train/101.Maltese/Maltese_06727.jpg inflating: dogImages/train/101.Maltese/Maltese_06729.jpg inflating: dogImages/train/101.Maltese/Maltese_06730.jpg inflating: dogImages/train/101.Maltese/Maltese_06731.jpg inflating: dogImages/train/101.Maltese/Maltese_06732.jpg inflating: dogImages/train/101.Maltese/Maltese_06733.jpg inflating: dogImages/train/101.Maltese/Maltese_06734.jpg inflating: dogImages/train/101.Maltese/Maltese_06735.jpg inflating: dogImages/train/101.Maltese/Maltese_06736.jpg inflating: dogImages/train/101.Maltese/Maltese_06738.jpg inflating: dogImages/train/101.Maltese/Maltese_06739.jpg inflating: dogImages/train/101.Maltese/Maltese_06740.jpg inflating: dogImages/train/101.Maltese/Maltese_06741.jpg inflating: dogImages/train/101.Maltese/Maltese_06745.jpg inflating: dogImages/train/101.Maltese/Maltese_06746.jpg inflating: dogImages/train/101.Maltese/Maltese_06747.jpg inflating: dogImages/train/101.Maltese/Maltese_06748.jpg inflating: dogImages/train/101.Maltese/Maltese_06749.jpg inflating: dogImages/train/101.Maltese/Maltese_06750.jpg inflating: dogImages/train/101.Maltese/Maltese_06751.jpg inflating: dogImages/train/101.Maltese/Maltese_06752.jpg inflating: dogImages/train/101.Maltese/Maltese_06753.jpg inflating: dogImages/train/101.Maltese/Maltese_06755.jpg inflating: dogImages/train/101.Maltese/Maltese_06756.jpg inflating: dogImages/train/101.Maltese/Maltese_06758.jpg inflating: dogImages/train/101.Maltese/Maltese_06759.jpg inflating: dogImages/train/101.Maltese/Maltese_06761.jpg inflating: dogImages/train/101.Maltese/Maltese_06762.jpg inflating: dogImages/train/101.Maltese/Maltese_06763.jpg inflating: dogImages/train/101.Maltese/Maltese_06764.jpg inflating: dogImages/train/101.Maltese/Maltese_06765.jpg inflating: dogImages/train/101.Maltese/Maltese_06767.jpg inflating: dogImages/train/101.Maltese/Maltese_06768.jpg inflating: dogImages/train/101.Maltese/Maltese_06769.jpg inflating: dogImages/train/101.Maltese/Maltese_06770.jpg inflating: dogImages/train/101.Maltese/Maltese_06771.jpg inflating: dogImages/train/101.Maltese/Maltese_06772.jpg creating: dogImages/train/102.Manchester_terrier/ inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06774.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06775.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06777.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06779.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06780.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06781.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06782.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06783.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06785.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06786.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06787.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06788.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06789.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06790.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06791.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06792.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06793.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06794.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06795.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06796.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06797.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06800.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06801.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06802.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06803.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06804.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06806.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06807.jpg inflating: dogImages/train/102.Manchester_terrier/Manchester_terrier_06808.jpg creating: dogImages/train/103.Mastiff/ inflating: dogImages/train/103.Mastiff/Mastiff_06809.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06811.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06812.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06813.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06814.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06817.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06818.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06819.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06820.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06821.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06822.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06824.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06826.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06828.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06829.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06831.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06832.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06833.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06834.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06835.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06837.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06838.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06839.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06840.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06841.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06842.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06843.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06844.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06845.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06846.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06848.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06849.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06850.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06851.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06852.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06853.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06854.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06856.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06857.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06858.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06860.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06861.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06862.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06863.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06864.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06865.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06866.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06867.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06868.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06869.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06870.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06871.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06872.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06874.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06875.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06876.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06877.jpg inflating: dogImages/train/103.Mastiff/Mastiff_06879.jpg creating: dogImages/train/104.Miniature_schnauzer/ inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06881.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06882.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06883.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06884.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06885.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06886.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06887.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06888.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06889.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06890.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06891.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06893.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06894.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06895.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06896.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06897.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06899.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06900.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06902.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06905.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06906.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06907.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06908.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06909.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06911.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06912.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06913.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06914.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06918.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06919.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06920.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06921.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06922.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06923.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06924.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06925.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06926.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06928.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06929.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06931.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06932.jpg inflating: dogImages/train/104.Miniature_schnauzer/Miniature_schnauzer_06933.jpg creating: dogImages/train/105.Neapolitan_mastiff/ inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06934.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06935.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06936.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06938.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06939.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06940.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06941.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06942.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06943.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06944.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06946.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06947.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06948.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06949.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06953.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06954.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06955.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06956.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06957.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06959.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06960.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06961.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06962.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06964.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06965.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06966.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06967.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06968.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06969.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06970.jpg inflating: dogImages/train/105.Neapolitan_mastiff/Neapolitan_mastiff_06971.jpg creating: dogImages/train/106.Newfoundland/ inflating: dogImages/train/106.Newfoundland/Newfoundland_06973.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06974.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06975.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06976.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06977.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06978.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06981.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06982.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06983.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06985.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06986.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06988.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06989.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06990.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06991.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06992.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06993.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06994.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06995.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06997.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06998.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_06999.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07000.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07001.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07002.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07003.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07004.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07005.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07006.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07008.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07011.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07012.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07013.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07014.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07016.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07017.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07018.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07019.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07021.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07022.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07023.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07024.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07025.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07026.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07027.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07028.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07030.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07032.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07033.jpg inflating: dogImages/train/106.Newfoundland/Newfoundland_07034.jpg creating: dogImages/train/107.Norfolk_terrier/ inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07035.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07036.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07037.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07038.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07039.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07041.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07042.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07043.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07044.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07045.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07047.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07048.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07049.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07050.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07051.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07053.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07055.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07057.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07058.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07060.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07061.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07062.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07063.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07064.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07065.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07066.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07067.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07068.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07069.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07070.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07071.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07072.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07073.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07074.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07075.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07076.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07078.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07079.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07080.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07082.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07085.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07086.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07088.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07089.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07090.jpg inflating: dogImages/train/107.Norfolk_terrier/Norfolk_terrier_07091.jpg creating: dogImages/train/108.Norwegian_buhund/ inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07093.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07095.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07096.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07097.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07098.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07099.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07100.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07101.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07103.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07104.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07106.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07107.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07108.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07109.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07110.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07112.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07113.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07114.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07115.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07116.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07117.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07118.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07121.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07122.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07123.jpg inflating: dogImages/train/108.Norwegian_buhund/Norwegian_buhund_07125.jpg creating: dogImages/train/109.Norwegian_elkhound/ inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07126.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07127.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07129.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07130.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07133.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07135.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07136.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07138.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07139.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07140.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07141.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07142.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07143.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07144.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07145.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07146.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07147.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07148.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07149.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07150.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07151.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07152.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07153.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07154.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07155.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07156.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07157.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07158.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07160.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07161.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07162.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07165.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07166.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07167.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07168.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07169.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07171.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07172.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07173.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07174.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07175.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07176.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07177.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07179.jpg inflating: dogImages/train/109.Norwegian_elkhound/Norwegian_elkhound_07181.jpg creating: dogImages/train/110.Norwegian_lundehund/ inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07182.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07184.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07185.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07187.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07188.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07189.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07190.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07191.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07192.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07193.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07195.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07196.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07197.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07198.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07199.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07200.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07202.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07204.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07206.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07207.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07208.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07209.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07210.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07211.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07212.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07213.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07214.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07215.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07216.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07217.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07219.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07220.jpg inflating: dogImages/train/110.Norwegian_lundehund/Norwegian_lundehund_07221.jpg creating: dogImages/train/111.Norwich_terrier/ inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07223.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07224.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07225.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07226.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07228.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07230.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07231.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07232.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07233.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07234.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07235.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07236.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07237.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07241.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07242.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07243.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07244.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07245.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07246.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07247.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07249.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07250.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07251.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07252.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07254.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07255.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07256.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07257.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07258.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07259.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07260.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07261.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07262.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07263.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07264.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07266.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07267.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07268.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07269.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07272.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07273.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07274.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07275.jpg inflating: dogImages/train/111.Norwich_terrier/Norwich_terrier_07276.jpg creating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/ inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07278.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07279.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07280.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07282.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07283.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07284.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07286.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07288.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07289.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07290.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07292.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07293.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07295.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07296.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07297.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07298.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07299.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07300.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07301.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07302.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07303.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07304.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07305.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07306.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07307.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07308.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07310.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07311.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07312.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07313.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07314.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07315.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07317.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07319.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07320.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07321.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07322.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07323.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07324.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07326.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07327.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07328.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07329.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07332.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07334.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07335.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07337.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07338.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07339.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07340.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07341.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07342.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07343.jpg inflating: dogImages/train/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07344.jpg creating: dogImages/train/113.Old_english_sheepdog/ inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07345.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07346.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07347.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07348.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07349.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07350.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07351.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07352.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07353.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07354.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07358.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07360.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07361.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07362.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07363.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07364.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07365.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07367.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07368.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07369.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07370.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07371.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07372.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07373.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07374.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07375.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07377.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07378.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07379.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07381.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07383.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07384.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07385.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07386.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07387.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07388.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07389.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07392.jpg inflating: dogImages/train/113.Old_english_sheepdog/Old_english_sheepdog_07393.jpg creating: dogImages/train/114.Otterhound/ inflating: dogImages/train/114.Otterhound/Otterhound_07395.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07397.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07398.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07399.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07400.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07401.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07403.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07404.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07406.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07407.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07408.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07410.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07411.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07412.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07413.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07414.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07415.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07416.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07417.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07418.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07419.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07420.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07422.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07423.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07424.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07425.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07426.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07427.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07428.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07429.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07431.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07432.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07433.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07434.jpg inflating: dogImages/train/114.Otterhound/Otterhound_07436.jpg creating: dogImages/train/115.Papillon/ inflating: dogImages/train/115.Papillon/Papillon_07439.jpg inflating: dogImages/train/115.Papillon/Papillon_07441.jpg inflating: dogImages/train/115.Papillon/Papillon_07442.jpg inflating: dogImages/train/115.Papillon/Papillon_07443.jpg inflating: dogImages/train/115.Papillon/Papillon_07444.jpg inflating: dogImages/train/115.Papillon/Papillon_07446.jpg inflating: dogImages/train/115.Papillon/Papillon_07447.jpg inflating: dogImages/train/115.Papillon/Papillon_07449.jpg inflating: dogImages/train/115.Papillon/Papillon_07450.jpg inflating: dogImages/train/115.Papillon/Papillon_07452.jpg inflating: dogImages/train/115.Papillon/Papillon_07453.jpg inflating: dogImages/train/115.Papillon/Papillon_07455.jpg inflating: dogImages/train/115.Papillon/Papillon_07456.jpg inflating: dogImages/train/115.Papillon/Papillon_07457.jpg inflating: dogImages/train/115.Papillon/Papillon_07458.jpg inflating: dogImages/train/115.Papillon/Papillon_07459.jpg inflating: dogImages/train/115.Papillon/Papillon_07460.jpg inflating: dogImages/train/115.Papillon/Papillon_07462.jpg inflating: dogImages/train/115.Papillon/Papillon_07463.jpg inflating: dogImages/train/115.Papillon/Papillon_07464.jpg inflating: dogImages/train/115.Papillon/Papillon_07465.jpg inflating: dogImages/train/115.Papillon/Papillon_07467.jpg inflating: dogImages/train/115.Papillon/Papillon_07469.jpg inflating: dogImages/train/115.Papillon/Papillon_07470.jpg inflating: dogImages/train/115.Papillon/Papillon_07471.jpg inflating: dogImages/train/115.Papillon/Papillon_07472.jpg inflating: dogImages/train/115.Papillon/Papillon_07473.jpg inflating: dogImages/train/115.Papillon/Papillon_07474.jpg inflating: dogImages/train/115.Papillon/Papillon_07475.jpg inflating: dogImages/train/115.Papillon/Papillon_07476.jpg inflating: dogImages/train/115.Papillon/Papillon_07477.jpg inflating: dogImages/train/115.Papillon/Papillon_07478.jpg inflating: dogImages/train/115.Papillon/Papillon_07479.jpg inflating: dogImages/train/115.Papillon/Papillon_07481.jpg inflating: dogImages/train/115.Papillon/Papillon_07482.jpg inflating: dogImages/train/115.Papillon/Papillon_07483.jpg inflating: dogImages/train/115.Papillon/Papillon_07485.jpg inflating: dogImages/train/115.Papillon/Papillon_07486.jpg inflating: dogImages/train/115.Papillon/Papillon_07488.jpg inflating: dogImages/train/115.Papillon/Papillon_07489.jpg inflating: dogImages/train/115.Papillon/Papillon_07490.jpg inflating: dogImages/train/115.Papillon/Papillon_07491.jpg inflating: dogImages/train/115.Papillon/Papillon_07492.jpg inflating: dogImages/train/115.Papillon/Papillon_07493.jpg inflating: dogImages/train/115.Papillon/Papillon_07494.jpg inflating: dogImages/train/115.Papillon/Papillon_07497.jpg inflating: dogImages/train/115.Papillon/Papillon_07499.jpg inflating: dogImages/train/115.Papillon/Papillon_07500.jpg inflating: dogImages/train/115.Papillon/Papillon_07501.jpg inflating: dogImages/train/115.Papillon/Papillon_07502.jpg inflating: dogImages/train/115.Papillon/Papillon_07503.jpg inflating: dogImages/train/115.Papillon/Papillon_07504.jpg inflating: dogImages/train/115.Papillon/Papillon_07505.jpg inflating: dogImages/train/115.Papillon/Papillon_07506.jpg inflating: dogImages/train/115.Papillon/Papillon_07507.jpg inflating: dogImages/train/115.Papillon/Papillon_07508.jpg inflating: dogImages/train/115.Papillon/Papillon_07509.jpg inflating: dogImages/train/115.Papillon/Papillon_07510.jpg inflating: dogImages/train/115.Papillon/Papillon_07511.jpg inflating: dogImages/train/115.Papillon/Papillon_07512.jpg inflating: dogImages/train/115.Papillon/Papillon_07513.jpg inflating: dogImages/train/115.Papillon/Papillon_07515.jpg inflating: dogImages/train/115.Papillon/Papillon_07516.jpg creating: dogImages/train/116.Parson_russell_terrier/ inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07517.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07518.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07519.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07520.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07521.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07522.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07523.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07524.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07525.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07526.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07527.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07528.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07530.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07532.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07533.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07534.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07535.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07536.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07537.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07538.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07540.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07541.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07542.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07544.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07545.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07547.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07548.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07551.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07552.jpg inflating: dogImages/train/116.Parson_russell_terrier/Parson_russell_terrier_07553.jpg creating: dogImages/train/117.Pekingese/ inflating: dogImages/train/117.Pekingese/Pekingese_07555.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07556.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07557.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07559.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07560.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07561.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07562.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07563.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07564.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07565.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07566.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07567.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07569.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07570.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07571.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07572.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07573.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07575.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07576.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07577.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07578.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07579.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07584.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07586.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07587.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07588.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07589.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07590.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07591.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07592.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07593.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07595.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07596.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07597.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07598.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07599.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07600.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07601.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07602.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07603.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07605.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07606.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07607.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07608.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07610.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07611.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07613.jpg inflating: dogImages/train/117.Pekingese/Pekingese_07614.jpg creating: dogImages/train/118.Pembroke_welsh_corgi/ inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07615.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07616.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07617.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07618.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07620.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07621.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07623.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07624.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07626.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07627.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07628.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07629.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07630.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07631.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07632.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07633.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07634.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07635.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07636.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07637.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07639.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07641.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07642.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07644.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07645.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07646.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07647.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07648.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07649.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07650.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07651.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07653.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07654.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07655.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07656.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07659.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07660.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07661.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07662.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07663.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07664.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07665.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07666.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07667.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07668.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07669.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07671.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07674.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07675.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07677.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07678.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07679.jpg inflating: dogImages/train/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07680.jpg creating: dogImages/train/119.Petit_basset_griffon_vendeen/ inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07681.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07682.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07683.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07684.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07685.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07686.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07687.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07688.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07690.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07692.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07694.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07695.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07696.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07697.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07698.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07699.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07700.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07701.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07702.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07704.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07705.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07707.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07708.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07709.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07710.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07711.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07712.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07716.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07717.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07718.jpg inflating: dogImages/train/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07719.jpg creating: dogImages/train/120.Pharaoh_hound/ inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07721.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07722.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07723.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07725.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07726.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07727.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07729.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07731.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07732.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07733.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07734.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07735.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07736.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07737.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07738.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07739.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07740.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07742.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07743.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07745.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07746.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07748.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07749.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07751.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07753.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07754.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07755.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07756.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07757.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07759.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07760.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07761.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07762.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07763.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07764.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07765.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07766.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07767.jpg inflating: dogImages/train/120.Pharaoh_hound/Pharaoh_hound_07768.jpg creating: dogImages/train/121.Plott/ inflating: dogImages/train/121.Plott/Plott_07769.jpg inflating: dogImages/train/121.Plott/Plott_07770.jpg inflating: dogImages/train/121.Plott/Plott_07772.jpg inflating: dogImages/train/121.Plott/Plott_07773.jpg inflating: dogImages/train/121.Plott/Plott_07774.jpg inflating: dogImages/train/121.Plott/Plott_07775.jpg inflating: dogImages/train/121.Plott/Plott_07776.jpg inflating: dogImages/train/121.Plott/Plott_07777.jpg inflating: dogImages/train/121.Plott/Plott_07778.jpg inflating: dogImages/train/121.Plott/Plott_07779.jpg inflating: dogImages/train/121.Plott/Plott_07781.jpg inflating: dogImages/train/121.Plott/Plott_07782.jpg inflating: dogImages/train/121.Plott/Plott_07783.jpg inflating: dogImages/train/121.Plott/Plott_07784.jpg inflating: dogImages/train/121.Plott/Plott_07785.jpg inflating: dogImages/train/121.Plott/Plott_07786.jpg inflating: dogImages/train/121.Plott/Plott_07787.jpg inflating: dogImages/train/121.Plott/Plott_07788.jpg inflating: dogImages/train/121.Plott/Plott_07789.jpg inflating: dogImages/train/121.Plott/Plott_07790.jpg inflating: dogImages/train/121.Plott/Plott_07791.jpg inflating: dogImages/train/121.Plott/Plott_07793.jpg inflating: dogImages/train/121.Plott/Plott_07794.jpg inflating: dogImages/train/121.Plott/Plott_07798.jpg inflating: dogImages/train/121.Plott/Plott_07799.jpg inflating: dogImages/train/121.Plott/Plott_07800.jpg inflating: dogImages/train/121.Plott/Plott_07801.jpg inflating: dogImages/train/121.Plott/Plott_07803.jpg creating: dogImages/train/122.Pointer/ inflating: dogImages/train/122.Pointer/Pointer_07804.jpg inflating: dogImages/train/122.Pointer/Pointer_07805.jpg inflating: dogImages/train/122.Pointer/Pointer_07806.jpg inflating: dogImages/train/122.Pointer/Pointer_07807.jpg inflating: dogImages/train/122.Pointer/Pointer_07809.jpg inflating: dogImages/train/122.Pointer/Pointer_07810.jpg inflating: dogImages/train/122.Pointer/Pointer_07811.jpg inflating: dogImages/train/122.Pointer/Pointer_07813.jpg inflating: dogImages/train/122.Pointer/Pointer_07814.jpg inflating: dogImages/train/122.Pointer/Pointer_07815.jpg inflating: dogImages/train/122.Pointer/Pointer_07816.jpg inflating: dogImages/train/122.Pointer/Pointer_07817.jpg inflating: dogImages/train/122.Pointer/Pointer_07818.jpg inflating: dogImages/train/122.Pointer/Pointer_07819.jpg inflating: dogImages/train/122.Pointer/Pointer_07820.jpg inflating: dogImages/train/122.Pointer/Pointer_07822.jpg inflating: dogImages/train/122.Pointer/Pointer_07823.jpg inflating: dogImages/train/122.Pointer/Pointer_07824.jpg inflating: dogImages/train/122.Pointer/Pointer_07825.jpg inflating: dogImages/train/122.Pointer/Pointer_07828.jpg inflating: dogImages/train/122.Pointer/Pointer_07829.jpg inflating: dogImages/train/122.Pointer/Pointer_07830.jpg inflating: dogImages/train/122.Pointer/Pointer_07832.jpg inflating: dogImages/train/122.Pointer/Pointer_07833.jpg inflating: dogImages/train/122.Pointer/Pointer_07835.jpg inflating: dogImages/train/122.Pointer/Pointer_07836.jpg inflating: dogImages/train/122.Pointer/Pointer_07837.jpg inflating: dogImages/train/122.Pointer/Pointer_07839.jpg inflating: dogImages/train/122.Pointer/Pointer_07840.jpg inflating: dogImages/train/122.Pointer/Pointer_07841.jpg inflating: dogImages/train/122.Pointer/Pointer_07842.jpg inflating: dogImages/train/122.Pointer/Pointer_07843.jpg creating: dogImages/train/123.Pomeranian/ inflating: dogImages/train/123.Pomeranian/Pomeranian_07844.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07845.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07846.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07847.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07848.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07849.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07850.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07851.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07852.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07854.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07855.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07856.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07859.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07860.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07862.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07864.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07865.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07867.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07868.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07870.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07871.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07872.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07874.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07875.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07876.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07877.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07878.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07879.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07880.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07882.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07883.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07884.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07885.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07886.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07887.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07888.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07889.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07890.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07891.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07892.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07893.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07895.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07896.jpg inflating: dogImages/train/123.Pomeranian/Pomeranian_07898.jpg creating: dogImages/train/124.Poodle/ inflating: dogImages/train/124.Poodle/Poodle_07899.jpg inflating: dogImages/train/124.Poodle/Poodle_07900.jpg inflating: dogImages/train/124.Poodle/Poodle_07901.jpg inflating: dogImages/train/124.Poodle/Poodle_07902.jpg inflating: dogImages/train/124.Poodle/Poodle_07906.jpg inflating: dogImages/train/124.Poodle/Poodle_07907.jpg inflating: dogImages/train/124.Poodle/Poodle_07908.jpg inflating: dogImages/train/124.Poodle/Poodle_07909.jpg inflating: dogImages/train/124.Poodle/Poodle_07912.jpg inflating: dogImages/train/124.Poodle/Poodle_07915.jpg inflating: dogImages/train/124.Poodle/Poodle_07916.jpg inflating: dogImages/train/124.Poodle/Poodle_07917.jpg inflating: dogImages/train/124.Poodle/Poodle_07918.jpg inflating: dogImages/train/124.Poodle/Poodle_07919.jpg inflating: dogImages/train/124.Poodle/Poodle_07920.jpg inflating: dogImages/train/124.Poodle/Poodle_07921.jpg inflating: dogImages/train/124.Poodle/Poodle_07922.jpg inflating: dogImages/train/124.Poodle/Poodle_07924.jpg inflating: dogImages/train/124.Poodle/Poodle_07926.jpg inflating: dogImages/train/124.Poodle/Poodle_07927.jpg inflating: dogImages/train/124.Poodle/Poodle_07928.jpg inflating: dogImages/train/124.Poodle/Poodle_07929.jpg inflating: dogImages/train/124.Poodle/Poodle_07930.jpg inflating: dogImages/train/124.Poodle/Poodle_07931.jpg inflating: dogImages/train/124.Poodle/Poodle_07932.jpg inflating: dogImages/train/124.Poodle/Poodle_07933.jpg inflating: dogImages/train/124.Poodle/Poodle_07934.jpg inflating: dogImages/train/124.Poodle/Poodle_07935.jpg inflating: dogImages/train/124.Poodle/Poodle_07936.jpg inflating: dogImages/train/124.Poodle/Poodle_07937.jpg inflating: dogImages/train/124.Poodle/Poodle_07938.jpg inflating: dogImages/train/124.Poodle/Poodle_07939.jpg inflating: dogImages/train/124.Poodle/Poodle_07940.jpg inflating: dogImages/train/124.Poodle/Poodle_07941.jpg inflating: dogImages/train/124.Poodle/Poodle_07942.jpg inflating: dogImages/train/124.Poodle/Poodle_07943.jpg inflating: dogImages/train/124.Poodle/Poodle_07944.jpg inflating: dogImages/train/124.Poodle/Poodle_07945.jpg inflating: dogImages/train/124.Poodle/Poodle_07947.jpg inflating: dogImages/train/124.Poodle/Poodle_07950.jpg inflating: dogImages/train/124.Poodle/Poodle_07951.jpg inflating: dogImages/train/124.Poodle/Poodle_07952.jpg inflating: dogImages/train/124.Poodle/Poodle_07953.jpg inflating: dogImages/train/124.Poodle/Poodle_07954.jpg inflating: dogImages/train/124.Poodle/Poodle_07955.jpg inflating: dogImages/train/124.Poodle/Poodle_07956.jpg inflating: dogImages/train/124.Poodle/Poodle_07957.jpg inflating: dogImages/train/124.Poodle/Poodle_07958.jpg inflating: dogImages/train/124.Poodle/Poodle_07959.jpg inflating: dogImages/train/124.Poodle/Poodle_07960.jpg creating: dogImages/train/125.Portuguese_water_dog/ inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07961.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07962.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07964.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07965.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07966.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07967.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07968.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07969.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07970.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07972.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07973.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07974.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07976.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07977.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07978.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07979.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07980.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07981.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07982.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07985.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07986.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07987.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07988.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07989.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07990.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07991.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07993.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07994.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07995.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07997.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07998.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_07999.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_08000.jpg inflating: dogImages/train/125.Portuguese_water_dog/Portuguese_water_dog_08001.jpg creating: dogImages/train/126.Saint_bernard/ inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08003.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08004.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08005.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08006.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08007.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08008.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08009.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08010.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08013.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08014.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08015.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08016.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08017.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08018.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08021.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08022.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08023.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08024.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08025.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08026.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08027.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08028.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08029.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08031.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08032.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08034.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08035.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08037.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08038.jpg inflating: dogImages/train/126.Saint_bernard/Saint_bernard_08039.jpg creating: dogImages/train/127.Silky_terrier/ inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08041.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08042.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08043.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08044.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08045.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08046.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08047.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08048.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08050.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08051.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08052.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08053.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08054.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08056.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08058.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08059.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08060.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08061.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08062.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08063.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08065.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08066.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08067.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08068.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08070.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08071.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08072.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08073.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08075.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08076.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08078.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08079.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08080.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08081.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08082.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08083.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08086.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08087.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08088.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08089.jpg inflating: dogImages/train/127.Silky_terrier/Silky_terrier_08090.jpg creating: dogImages/train/128.Smooth_fox_terrier/ inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08091.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08092.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08093.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08094.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08095.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08096.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08097.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08098.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08100.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08101.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08102.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08103.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08104.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08105.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08106.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08108.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08109.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08111.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08112.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08114.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08115.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08117.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08118.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08119.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08120.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08121.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08122.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08124.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08125.jpg inflating: dogImages/train/128.Smooth_fox_terrier/Smooth_fox_terrier_08126.jpg creating: dogImages/train/129.Tibetan_mastiff/ inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08129.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08130.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08131.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08132.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08133.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08134.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08135.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08136.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08141.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08142.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08143.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08144.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08145.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08146.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08148.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08149.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08150.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08151.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08152.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08153.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08154.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08155.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08157.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08159.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08160.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08161.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08162.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08163.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08164.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08165.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08166.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08167.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08168.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08169.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08172.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08175.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08176.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08177.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08178.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08179.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08180.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08181.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08182.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08183.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08184.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08186.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08187.jpg inflating: dogImages/train/129.Tibetan_mastiff/Tibetan_mastiff_08188.jpg creating: dogImages/train/130.Welsh_springer_spaniel/ inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08189.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08191.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08192.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08193.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08194.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08195.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08196.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08197.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08198.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08199.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08200.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08202.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08203.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08204.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08205.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08207.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08208.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08209.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08210.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08211.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08212.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08213.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08216.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08217.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08218.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08219.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08220.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08221.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08223.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08224.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08225.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08226.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08229.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08230.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08231.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08232.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08233.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08234.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08236.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08238.jpg extracting: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08239.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08241.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08242.jpg inflating: dogImages/train/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08243.jpg creating: dogImages/train/131.Wirehaired_pointing_griffon/ inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08244.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08246.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08247.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08248.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08249.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08250.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08252.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08253.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08254.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08255.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08256.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08257.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08258.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08259.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08260.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08261.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08262.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08264.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08265.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08268.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08269.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08270.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08271.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08272.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08273.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08274.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08276.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08277.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08278.jpg inflating: dogImages/train/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08280.jpg creating: dogImages/train/132.Xoloitzcuintli/ inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08282.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08283.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08284.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08285.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08286.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08287.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08288.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08289.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08290.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08291.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08292.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08293.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08295.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08296.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08297.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08300.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08302.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08303.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08305.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08306.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08307.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08308.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08309.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08310.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08311.jpg inflating: dogImages/train/132.Xoloitzcuintli/Xoloitzcuintli_08313.jpg creating: dogImages/train/133.Yorkshire_terrier/ inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08314.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08315.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08316.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08317.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08318.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08319.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08320.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08321.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08322.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08323.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08324.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08327.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08328.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08329.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08330.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08331.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08332.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08335.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08338.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08339.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08340.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08341.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08342.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08343.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08344.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08345.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08347.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08349.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08350.jpg inflating: dogImages/train/133.Yorkshire_terrier/Yorkshire_terrier_08351.jpg creating: dogImages/valid/001.Affenpinscher/ inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00038.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00040.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00041.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00042.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00049.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00055.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00068.jpg inflating: dogImages/valid/001.Affenpinscher/Affenpinscher_00074.jpg creating: dogImages/valid/002.Afghan_hound/ inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00095.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00100.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00106.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00115.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00120.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00129.jpg inflating: dogImages/valid/002.Afghan_hound/Afghan_hound_00132.jpg creating: dogImages/valid/003.Airedale_terrier/ inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00155.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00160.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00178.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00182.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00183.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00206.jpg inflating: dogImages/valid/003.Airedale_terrier/Airedale_terrier_00208.jpg creating: dogImages/valid/004.Akita/ inflating: dogImages/valid/004.Akita/Akita_00219.jpg inflating: dogImages/valid/004.Akita/Akita_00227.jpg inflating: dogImages/valid/004.Akita/Akita_00235.jpg inflating: dogImages/valid/004.Akita/Akita_00237.jpg inflating: dogImages/valid/004.Akita/Akita_00247.jpg inflating: dogImages/valid/004.Akita/Akita_00252.jpg inflating: dogImages/valid/004.Akita/Akita_00265.jpg inflating: dogImages/valid/004.Akita/Akita_00278.jpg creating: dogImages/valid/005.Alaskan_malamute/ inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00298.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00318.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00323.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00324.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00336.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00344.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00369.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00371.jpg inflating: dogImages/valid/005.Alaskan_malamute/Alaskan_malamute_00390.jpg creating: dogImages/valid/006.American_eskimo_dog/ inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00410.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00418.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00429.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00434.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00446.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00449.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00460.jpg inflating: dogImages/valid/006.American_eskimo_dog/American_eskimo_dog_00464.jpg creating: dogImages/valid/007.American_foxhound/ inflating: dogImages/valid/007.American_foxhound/American_foxhound_00481.jpg inflating: dogImages/valid/007.American_foxhound/American_foxhound_00485.jpg inflating: dogImages/valid/007.American_foxhound/American_foxhound_00487.jpg inflating: dogImages/valid/007.American_foxhound/American_foxhound_00505.jpg inflating: dogImages/valid/007.American_foxhound/American_foxhound_00523.jpg inflating: dogImages/valid/007.American_foxhound/American_foxhound_00526.jpg creating: dogImages/valid/008.American_staffordshire_terrier/ inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00544.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00546.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00551.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00554.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00557.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00561.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00582.jpg inflating: dogImages/valid/008.American_staffordshire_terrier/American_staffordshire_terrier_00594.jpg creating: dogImages/valid/009.American_water_spaniel/ inflating: dogImages/valid/009.American_water_spaniel/American_water_spaniel_00633.jpg inflating: dogImages/valid/009.American_water_spaniel/American_water_spaniel_00642.jpg inflating: dogImages/valid/009.American_water_spaniel/American_water_spaniel_00648.jpg inflating: dogImages/valid/009.American_water_spaniel/American_water_spaniel_00654.jpg creating: dogImages/valid/010.Anatolian_shepherd_dog/ inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00671.jpg inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00674.jpg inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00682.jpg inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00688.jpg inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00697.jpg inflating: dogImages/valid/010.Anatolian_shepherd_dog/Anatolian_shepherd_dog_00719.jpg creating: dogImages/valid/011.Australian_cattle_dog/ inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00733.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00735.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00763.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00767.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00786.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00787.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00798.jpg inflating: dogImages/valid/011.Australian_cattle_dog/Australian_cattle_dog_00799.jpg creating: dogImages/valid/012.Australian_shepherd/ inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00808.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00820.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00822.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00827.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00847.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00867.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00873.jpg inflating: dogImages/valid/012.Australian_shepherd/Australian_shepherd_00879.jpg creating: dogImages/valid/013.Australian_terrier/ inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00893.jpg inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00896.jpg inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00913.jpg inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00914.jpg inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00927.jpg inflating: dogImages/valid/013.Australian_terrier/Australian_terrier_00929.jpg creating: dogImages/valid/014.Basenji/ inflating: dogImages/valid/014.Basenji/Basenji_00950.jpg inflating: dogImages/valid/014.Basenji/Basenji_00951.jpg inflating: dogImages/valid/014.Basenji/Basenji_00960.jpg inflating: dogImages/valid/014.Basenji/Basenji_00964.jpg inflating: dogImages/valid/014.Basenji/Basenji_00967.jpg inflating: dogImages/valid/014.Basenji/Basenji_00981.jpg inflating: dogImages/valid/014.Basenji/Basenji_00997.jpg inflating: dogImages/valid/014.Basenji/Basenji_00998.jpg creating: dogImages/valid/015.Basset_hound/ inflating: dogImages/valid/015.Basset_hound/Basset_hound_01053.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01063.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01073.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01076.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01093.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01096.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01109.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01117.jpg inflating: dogImages/valid/015.Basset_hound/Basset_hound_01124.jpg creating: dogImages/valid/016.Beagle/ inflating: dogImages/valid/016.Beagle/Beagle_01126.jpg inflating: dogImages/valid/016.Beagle/Beagle_01137.jpg inflating: dogImages/valid/016.Beagle/Beagle_01157.jpg inflating: dogImages/valid/016.Beagle/Beagle_01176.jpg inflating: dogImages/valid/016.Beagle/Beagle_01183.jpg inflating: dogImages/valid/016.Beagle/Beagle_01184.jpg inflating: dogImages/valid/016.Beagle/Beagle_01187.jpg creating: dogImages/valid/017.Bearded_collie/ inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01222.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01223.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01225.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01248.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01257.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01267.jpg inflating: dogImages/valid/017.Bearded_collie/Bearded_collie_01268.jpg creating: dogImages/valid/018.Beauceron/ inflating: dogImages/valid/018.Beauceron/Beauceron_01288.jpg inflating: dogImages/valid/018.Beauceron/Beauceron_01293.jpg inflating: dogImages/valid/018.Beauceron/Beauceron_01302.jpg inflating: dogImages/valid/018.Beauceron/Beauceron_01318.jpg inflating: dogImages/valid/018.Beauceron/Beauceron_01326.jpg inflating: dogImages/valid/018.Beauceron/Beauceron_01331.jpg creating: dogImages/valid/019.Bedlington_terrier/ inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01347.jpg inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01348.jpg inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01357.jpg inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01367.jpg inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01370.jpg inflating: dogImages/valid/019.Bedlington_terrier/Bedlington_terrier_01396.jpg creating: dogImages/valid/020.Belgian_malinois/ inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01409.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01410.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01428.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01436.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01443.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01448.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01459.jpg inflating: dogImages/valid/020.Belgian_malinois/Belgian_malinois_01475.jpg creating: dogImages/valid/021.Belgian_sheepdog/ inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01485.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01489.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01490.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01510.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01521.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01522.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01542.jpg inflating: dogImages/valid/021.Belgian_sheepdog/Belgian_sheepdog_01549.jpg creating: dogImages/valid/022.Belgian_tervuren/ inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01571.jpg inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01587.jpg inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01590.jpg inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01599.jpg inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01610.jpg inflating: dogImages/valid/022.Belgian_tervuren/Belgian_tervuren_01612.jpg creating: dogImages/valid/023.Bernese_mountain_dog/ inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01626.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01635.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01645.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01661.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01672.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01673.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01679.jpg inflating: dogImages/valid/023.Bernese_mountain_dog/Bernese_mountain_dog_01690.jpg creating: dogImages/valid/024.Bichon_frise/ inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01708.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01710.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01734.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01740.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01745.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01748.jpg inflating: dogImages/valid/024.Bichon_frise/Bichon_frise_01769.jpg creating: dogImages/valid/025.Black_and_tan_coonhound/ inflating: dogImages/valid/025.Black_and_tan_coonhound/Black_and_tan_coonhound_01775.jpg inflating: dogImages/valid/025.Black_and_tan_coonhound/Black_and_tan_coonhound_01785.jpg inflating: dogImages/valid/025.Black_and_tan_coonhound/Black_and_tan_coonhound_01788.jpg inflating: dogImages/valid/025.Black_and_tan_coonhound/Black_and_tan_coonhound_01790.jpg inflating: dogImages/valid/025.Black_and_tan_coonhound/Black_and_tan_coonhound_01798.jpg creating: dogImages/valid/026.Black_russian_terrier/ inflating: dogImages/valid/026.Black_russian_terrier/Black_russian_terrier_01825.jpg inflating: dogImages/valid/026.Black_russian_terrier/Black_russian_terrier_01839.jpg inflating: dogImages/valid/026.Black_russian_terrier/Black_russian_terrier_01855.jpg inflating: dogImages/valid/026.Black_russian_terrier/Black_russian_terrier_01859.jpg inflating: dogImages/valid/026.Black_russian_terrier/Black_russian_terrier_01860.jpg creating: dogImages/valid/027.Bloodhound/ inflating: dogImages/valid/027.Bloodhound/Bloodhound_01874.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01887.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01891.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01894.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01927.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01931.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01932.jpg inflating: dogImages/valid/027.Bloodhound/Bloodhound_01935.jpg creating: dogImages/valid/028.Bluetick_coonhound/ inflating: dogImages/valid/028.Bluetick_coonhound/Bluetick_coonhound_01962.jpg inflating: dogImages/valid/028.Bluetick_coonhound/Bluetick_coonhound_01965.jpg inflating: dogImages/valid/028.Bluetick_coonhound/Bluetick_coonhound_01966.jpg inflating: dogImages/valid/028.Bluetick_coonhound/Bluetick_coonhound_01978.jpg inflating: dogImages/valid/028.Bluetick_coonhound/Bluetick_coonhound_01992.jpg creating: dogImages/valid/029.Border_collie/ inflating: dogImages/valid/029.Border_collie/Border_collie_01995.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_01998.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02001.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02004.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02030.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02049.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02054.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02078.jpg inflating: dogImages/valid/029.Border_collie/Border_collie_02085.jpg creating: dogImages/valid/030.Border_terrier/ inflating: dogImages/valid/030.Border_terrier/Border_terrier_02100.jpg inflating: dogImages/valid/030.Border_terrier/Border_terrier_02118.jpg inflating: dogImages/valid/030.Border_terrier/Border_terrier_02126.jpg inflating: dogImages/valid/030.Border_terrier/Border_terrier_02130.jpg inflating: dogImages/valid/030.Border_terrier/Border_terrier_02146.jpg inflating: dogImages/valid/030.Border_terrier/Border_terrier_02151.jpg creating: dogImages/valid/031.Borzoi/ inflating: dogImages/valid/031.Borzoi/Borzoi_02173.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02178.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02182.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02193.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02211.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02216.jpg inflating: dogImages/valid/031.Borzoi/Borzoi_02222.jpg creating: dogImages/valid/032.Boston_terrier/ inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02230.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02261.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02268.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02269.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02275.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02283.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02289.jpg inflating: dogImages/valid/032.Boston_terrier/Boston_terrier_02299.jpg creating: dogImages/valid/033.Bouvier_des_flandres/ inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02322.jpg inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02325.jpg inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02329.jpg inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02356.jpg inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02358.jpg inflating: dogImages/valid/033.Bouvier_des_flandres/Bouvier_des_flandres_02359.jpg creating: dogImages/valid/034.Boxer/ inflating: dogImages/valid/034.Boxer/Boxer_02370.jpg inflating: dogImages/valid/034.Boxer/Boxer_02384.jpg inflating: dogImages/valid/034.Boxer/Boxer_02392.jpg inflating: dogImages/valid/034.Boxer/Boxer_02406.jpg inflating: dogImages/valid/034.Boxer/Boxer_02409.jpg inflating: dogImages/valid/034.Boxer/Boxer_02429.jpg inflating: dogImages/valid/034.Boxer/Boxer_02430.jpg inflating: dogImages/valid/034.Boxer/Boxer_02438.jpg creating: dogImages/valid/035.Boykin_spaniel/ inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02451.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02452.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02465.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02468.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02471.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02485.jpg inflating: dogImages/valid/035.Boykin_spaniel/Boykin_spaniel_02505.jpg creating: dogImages/valid/036.Briard/ inflating: dogImages/valid/036.Briard/Briard_02512.jpg inflating: dogImages/valid/036.Briard/Briard_02518.jpg inflating: dogImages/valid/036.Briard/Briard_02528.jpg inflating: dogImages/valid/036.Briard/Briard_02532.jpg inflating: dogImages/valid/036.Briard/Briard_02534.jpg inflating: dogImages/valid/036.Briard/Briard_02556.jpg inflating: dogImages/valid/036.Briard/Briard_02558.jpg inflating: dogImages/valid/036.Briard/Briard_02574.jpg creating: dogImages/valid/037.Brittany/ inflating: dogImages/valid/037.Brittany/Brittany_02597.jpg inflating: dogImages/valid/037.Brittany/Brittany_02613.jpg inflating: dogImages/valid/037.Brittany/Brittany_02616.jpg inflating: dogImages/valid/037.Brittany/Brittany_02635.jpg inflating: dogImages/valid/037.Brittany/Brittany_02642.jpg inflating: dogImages/valid/037.Brittany/Brittany_02644.jpg creating: dogImages/valid/038.Brussels_griffon/ inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02665.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02672.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02693.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02695.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02697.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02703.jpg inflating: dogImages/valid/038.Brussels_griffon/Brussels_griffon_02719.jpg creating: dogImages/valid/039.Bull_terrier/ inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02732.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02736.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02747.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02774.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02780.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02790.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02797.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02799.jpg inflating: dogImages/valid/039.Bull_terrier/Bull_terrier_02806.jpg creating: dogImages/valid/040.Bulldog/ inflating: dogImages/valid/040.Bulldog/Bulldog_02811.jpg inflating: dogImages/valid/040.Bulldog/Bulldog_02812.jpg inflating: dogImages/valid/040.Bulldog/Bulldog_02830.jpg inflating: dogImages/valid/040.Bulldog/Bulldog_02852.jpg inflating: dogImages/valid/040.Bulldog/Bulldog_02857.jpg inflating: dogImages/valid/040.Bulldog/Bulldog_02872.jpg creating: dogImages/valid/041.Bullmastiff/ inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02875.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02880.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02897.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02905.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02922.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02928.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02936.jpg inflating: dogImages/valid/041.Bullmastiff/Bullmastiff_02952.jpg creating: dogImages/valid/042.Cairn_terrier/ inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_02963.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_02966.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_02972.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_02977.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_03009.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_03013.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_03023.jpg inflating: dogImages/valid/042.Cairn_terrier/Cairn_terrier_03035.jpg creating: dogImages/valid/043.Canaan_dog/ inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03051.jpg inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03054.jpg inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03067.jpg inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03070.jpg inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03091.jpg inflating: dogImages/valid/043.Canaan_dog/Canaan_dog_03095.jpg creating: dogImages/valid/044.Cane_corso/ inflating: dogImages/valid/044.Cane_corso/Cane_corso_03103.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03104.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03122.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03136.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03173.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03176.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03177.jpg inflating: dogImages/valid/044.Cane_corso/Cane_corso_03179.jpg creating: dogImages/valid/045.Cardigan_welsh_corgi/ inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03195.jpg inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03201.jpg inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03206.jpg inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03207.jpg inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03232.jpg inflating: dogImages/valid/045.Cardigan_welsh_corgi/Cardigan_welsh_corgi_03239.jpg creating: dogImages/valid/046.Cavalier_king_charles_spaniel/ inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03257.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03265.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03273.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03274.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03275.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03280.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03293.jpg inflating: dogImages/valid/046.Cavalier_king_charles_spaniel/Cavalier_king_charles_spaniel_03322.jpg creating: dogImages/valid/047.Chesapeake_bay_retriever/ inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03333.jpg inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03369.jpg inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03373.jpg inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03385.jpg inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03391.jpg inflating: dogImages/valid/047.Chesapeake_bay_retriever/Chesapeake_bay_retriever_03393.jpg creating: dogImages/valid/048.Chihuahua/ inflating: dogImages/valid/048.Chihuahua/Chihuahua_03397.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03419.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03422.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03445.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03457.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03459.jpg inflating: dogImages/valid/048.Chihuahua/Chihuahua_03464.jpg creating: dogImages/valid/049.Chinese_crested/ inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03467.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03473.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03474.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03482.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03493.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03497.jpg inflating: dogImages/valid/049.Chinese_crested/Chinese_crested_03504.jpg creating: dogImages/valid/050.Chinese_shar-pei/ inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03531.jpg inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03533.jpg inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03540.jpg inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03562.jpg inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03566.jpg inflating: dogImages/valid/050.Chinese_shar-pei/Chinese_shar-pei_03575.jpg creating: dogImages/valid/051.Chow_chow/ inflating: dogImages/valid/051.Chow_chow/Chow_chow_03606.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03612.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03624.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03628.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03629.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03636.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03657.jpg inflating: dogImages/valid/051.Chow_chow/Chow_chow_03661.jpg creating: dogImages/valid/052.Clumber_spaniel/ inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03670.jpg inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03672.jpg inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03679.jpg inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03684.jpg inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03698.jpg inflating: dogImages/valid/052.Clumber_spaniel/Clumber_spaniel_03724.jpg creating: dogImages/valid/053.Cocker_spaniel/ inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03733.jpg inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03742.jpg inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03766.jpg inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03769.jpg inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03775.jpg inflating: dogImages/valid/053.Cocker_spaniel/Cocker_spaniel_03776.jpg creating: dogImages/valid/054.Collie/ inflating: dogImages/valid/054.Collie/Collie_03791.jpg inflating: dogImages/valid/054.Collie/Collie_03797.jpg inflating: dogImages/valid/054.Collie/Collie_03833.jpg inflating: dogImages/valid/054.Collie/Collie_03842.jpg inflating: dogImages/valid/054.Collie/Collie_03854.jpg inflating: dogImages/valid/054.Collie/Collie_03855.jpg inflating: dogImages/valid/054.Collie/Collie_03858.jpg creating: dogImages/valid/055.Curly-coated_retriever/ inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03867.jpg inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03869.jpg inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03872.jpg inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03881.jpg inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03882.jpg inflating: dogImages/valid/055.Curly-coated_retriever/Curly-coated_retriever_03885.jpg creating: dogImages/valid/056.Dachshund/ inflating: dogImages/valid/056.Dachshund/Dachshund_03926.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03929.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03936.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03960.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03973.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03981.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03985.jpg inflating: dogImages/valid/056.Dachshund/Dachshund_03997.jpg creating: dogImages/valid/057.Dalmatian/ inflating: dogImages/valid/057.Dalmatian/Dalmatian_04005.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04014.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04020.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04051.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04057.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04058.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04066.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04079.jpg inflating: dogImages/valid/057.Dalmatian/Dalmatian_04088.jpg creating: dogImages/valid/058.Dandie_dinmont_terrier/ inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04096.jpg inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04117.jpg inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04124.jpg inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04126.jpg inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04144.jpg inflating: dogImages/valid/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04149.jpg creating: dogImages/valid/059.Doberman_pinscher/ inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04158.jpg inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04165.jpg inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04178.jpg inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04197.jpg inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04202.jpg inflating: dogImages/valid/059.Doberman_pinscher/Doberman_pinscher_04208.jpg creating: dogImages/valid/060.Dogue_de_bordeaux/ inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04225.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04229.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04265.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04270.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04271.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04274.jpg inflating: dogImages/valid/060.Dogue_de_bordeaux/Dogue_de_bordeaux_04282.jpg creating: dogImages/valid/061.English_cocker_spaniel/ inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04295.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04302.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04303.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04330.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04338.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04341.jpg inflating: dogImages/valid/061.English_cocker_spaniel/English_cocker_spaniel_04364.jpg creating: dogImages/valid/062.English_setter/ inflating: dogImages/valid/062.English_setter/English_setter_04369.jpg inflating: dogImages/valid/062.English_setter/English_setter_04372.jpg inflating: dogImages/valid/062.English_setter/English_setter_04386.jpg inflating: dogImages/valid/062.English_setter/English_setter_04388.jpg inflating: dogImages/valid/062.English_setter/English_setter_04391.jpg inflating: dogImages/valid/062.English_setter/English_setter_04414.jpg inflating: dogImages/valid/062.English_setter/English_setter_04424.jpg creating: dogImages/valid/063.English_springer_spaniel/ inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04434.jpg inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04437.jpg inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04450.jpg inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04466.jpg inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04491.jpg inflating: dogImages/valid/063.English_springer_spaniel/English_springer_spaniel_04496.jpg creating: dogImages/valid/064.English_toy_spaniel/ inflating: dogImages/valid/064.English_toy_spaniel/English_toy_spaniel_04499.jpg inflating: dogImages/valid/064.English_toy_spaniel/English_toy_spaniel_04515.jpg inflating: dogImages/valid/064.English_toy_spaniel/English_toy_spaniel_04518.jpg inflating: dogImages/valid/064.English_toy_spaniel/English_toy_spaniel_04530.jpg inflating: dogImages/valid/064.English_toy_spaniel/English_toy_spaniel_04541.jpg creating: dogImages/valid/065.Entlebucher_mountain_dog/ inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04556.jpg inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04562.jpg inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04568.jpg inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04574.jpg inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04587.jpg inflating: dogImages/valid/065.Entlebucher_mountain_dog/Entlebucher_mountain_dog_04590.jpg creating: dogImages/valid/066.Field_spaniel/ inflating: dogImages/valid/066.Field_spaniel/Field_spaniel_04602.jpg inflating: dogImages/valid/066.Field_spaniel/Field_spaniel_04610.jpg inflating: dogImages/valid/066.Field_spaniel/Field_spaniel_04611.jpg inflating: dogImages/valid/066.Field_spaniel/Field_spaniel_04614.jpg creating: dogImages/valid/067.Finnish_spitz/ inflating: dogImages/valid/067.Finnish_spitz/Finnish_spitz_04650.jpg inflating: dogImages/valid/067.Finnish_spitz/Finnish_spitz_04652.jpg inflating: dogImages/valid/067.Finnish_spitz/Finnish_spitz_04666.jpg inflating: dogImages/valid/067.Finnish_spitz/Finnish_spitz_04676.jpg creating: dogImages/valid/068.Flat-coated_retriever/ inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04683.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04685.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04694.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04699.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04705.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04718.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04750.jpg inflating: dogImages/valid/068.Flat-coated_retriever/Flat-coated_retriever_04758.jpg creating: dogImages/valid/069.French_bulldog/ inflating: dogImages/valid/069.French_bulldog/French_bulldog_04764.jpg inflating: dogImages/valid/069.French_bulldog/French_bulldog_04770.jpg inflating: dogImages/valid/069.French_bulldog/French_bulldog_04775.jpg inflating: dogImages/valid/069.French_bulldog/French_bulldog_04784.jpg inflating: dogImages/valid/069.French_bulldog/French_bulldog_04792.jpg inflating: dogImages/valid/069.French_bulldog/French_bulldog_04807.jpg creating: dogImages/valid/070.German_pinscher/ inflating: dogImages/valid/070.German_pinscher/German_pinscher_04830.jpg inflating: dogImages/valid/070.German_pinscher/German_pinscher_04845.jpg inflating: dogImages/valid/070.German_pinscher/German_pinscher_04849.jpg inflating: dogImages/valid/070.German_pinscher/German_pinscher_04854.jpg inflating: dogImages/valid/070.German_pinscher/German_pinscher_04862.jpg inflating: dogImages/valid/070.German_pinscher/German_pinscher_04871.jpg creating: dogImages/valid/071.German_shepherd_dog/ inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04897.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04900.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04915.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04918.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04921.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04934.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04936.jpg inflating: dogImages/valid/071.German_shepherd_dog/German_shepherd_dog_04947.jpg creating: dogImages/valid/072.German_shorthaired_pointer/ inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_04965.jpg inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_04966.jpg inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_04967.jpg inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_04985.jpg inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_04998.jpg inflating: dogImages/valid/072.German_shorthaired_pointer/German_shorthaired_pointer_05007.jpg creating: dogImages/valid/073.German_wirehaired_pointer/ inflating: dogImages/valid/073.German_wirehaired_pointer/German_wirehaired_pointer_05031.jpg inflating: dogImages/valid/073.German_wirehaired_pointer/German_wirehaired_pointer_05041.jpg inflating: dogImages/valid/073.German_wirehaired_pointer/German_wirehaired_pointer_05054.jpg inflating: dogImages/valid/073.German_wirehaired_pointer/German_wirehaired_pointer_05062.jpg inflating: dogImages/valid/073.German_wirehaired_pointer/German_wirehaired_pointer_05070.jpg creating: dogImages/valid/074.Giant_schnauzer/ inflating: dogImages/valid/074.Giant_schnauzer/Giant_schnauzer_05075.jpg inflating: dogImages/valid/074.Giant_schnauzer/Giant_schnauzer_05089.jpg inflating: dogImages/valid/074.Giant_schnauzer/Giant_schnauzer_05099.jpg inflating: dogImages/valid/074.Giant_schnauzer/Giant_schnauzer_05110.jpg inflating: dogImages/valid/074.Giant_schnauzer/Giant_schnauzer_05123.jpg creating: dogImages/valid/075.Glen_of_imaal_terrier/ inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05132.jpg inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05135.jpg inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05152.jpg inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05153.jpg inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05156.jpg inflating: dogImages/valid/075.Glen_of_imaal_terrier/Glen_of_imaal_terrier_05161.jpg creating: dogImages/valid/076.Golden_retriever/ inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05188.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05194.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05196.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05214.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05228.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05245.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05251.jpg inflating: dogImages/valid/076.Golden_retriever/Golden_retriever_05256.jpg creating: dogImages/valid/077.Gordon_setter/ inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05271.jpg inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05281.jpg inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05282.jpg inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05284.jpg inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05302.jpg inflating: dogImages/valid/077.Gordon_setter/Gordon_setter_05304.jpg creating: dogImages/valid/078.Great_dane/ inflating: dogImages/valid/078.Great_dane/Great_dane_05328.jpg inflating: dogImages/valid/078.Great_dane/Great_dane_05346.jpg inflating: dogImages/valid/078.Great_dane/Great_dane_05348.jpg inflating: dogImages/valid/078.Great_dane/Great_dane_05355.jpg inflating: dogImages/valid/078.Great_dane/Great_dane_05362.jpg creating: dogImages/valid/079.Great_pyrenees/ inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05368.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05379.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05403.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05406.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05410.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05423.jpg inflating: dogImages/valid/079.Great_pyrenees/Great_pyrenees_05431.jpg creating: dogImages/valid/080.Greater_swiss_mountain_dog/ inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05454.jpg inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05459.jpg inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05460.jpg inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05465.jpg inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05478.jpg inflating: dogImages/valid/080.Greater_swiss_mountain_dog/Greater_swiss_mountain_dog_05489.jpg creating: dogImages/valid/081.Greyhound/ inflating: dogImages/valid/081.Greyhound/Greyhound_05497.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05521.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05523.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05528.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05531.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05551.jpg inflating: dogImages/valid/081.Greyhound/Greyhound_05563.jpg creating: dogImages/valid/082.Havanese/ inflating: dogImages/valid/082.Havanese/Havanese_05569.jpg inflating: dogImages/valid/082.Havanese/Havanese_05570.jpg inflating: dogImages/valid/082.Havanese/Havanese_05573.jpg inflating: dogImages/valid/082.Havanese/Havanese_05592.jpg inflating: dogImages/valid/082.Havanese/Havanese_05626.jpg inflating: dogImages/valid/082.Havanese/Havanese_05637.jpg inflating: dogImages/valid/082.Havanese/Havanese_05641.jpg creating: dogImages/valid/083.Ibizan_hound/ inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05647.jpg inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05653.jpg inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05659.jpg inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05673.jpg inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05675.jpg inflating: dogImages/valid/083.Ibizan_hound/Ibizan_hound_05676.jpg creating: dogImages/valid/084.Icelandic_sheepdog/ inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05713.jpg inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05738.jpg inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05740.jpg inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05742.jpg inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05757.jpg inflating: dogImages/valid/084.Icelandic_sheepdog/Icelandic_sheepdog_05759.jpg creating: dogImages/valid/085.Irish_red_and_white_setter/ inflating: dogImages/valid/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05770.jpg inflating: dogImages/valid/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05773.jpg inflating: dogImages/valid/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05775.jpg inflating: dogImages/valid/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05778.jpg inflating: dogImages/valid/085.Irish_red_and_white_setter/Irish_red_and_white_setter_05796.jpg creating: dogImages/valid/086.Irish_setter/ inflating: dogImages/valid/086.Irish_setter/Irish_setter_05826.jpg inflating: dogImages/valid/086.Irish_setter/Irish_setter_05827.jpg inflating: dogImages/valid/086.Irish_setter/Irish_setter_05846.jpg inflating: dogImages/valid/086.Irish_setter/Irish_setter_05855.jpg inflating: dogImages/valid/086.Irish_setter/Irish_setter_05864.jpg inflating: dogImages/valid/086.Irish_setter/Irish_setter_05873.jpg creating: dogImages/valid/087.Irish_terrier/ inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05880.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05889.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05918.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05919.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05923.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05928.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05937.jpg inflating: dogImages/valid/087.Irish_terrier/Irish_terrier_05950.jpg creating: dogImages/valid/088.Irish_water_spaniel/ inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05956.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05962.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05964.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05971.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05986.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_05990.jpg inflating: dogImages/valid/088.Irish_water_spaniel/Irish_water_spaniel_06007.jpg creating: dogImages/valid/089.Irish_wolfhound/ inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06033.jpg inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06039.jpg inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06046.jpg inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06061.jpg inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06067.jpg inflating: dogImages/valid/089.Irish_wolfhound/Irish_wolfhound_06084.jpg creating: dogImages/valid/090.Italian_greyhound/ inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06087.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06088.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06091.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06100.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06103.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06122.jpg inflating: dogImages/valid/090.Italian_greyhound/Italian_greyhound_06157.jpg creating: dogImages/valid/091.Japanese_chin/ inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06164.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06167.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06179.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06181.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06210.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06214.jpg inflating: dogImages/valid/091.Japanese_chin/Japanese_chin_06216.jpg creating: dogImages/valid/092.Keeshond/ inflating: dogImages/valid/092.Keeshond/Keeshond_06234.jpg inflating: dogImages/valid/092.Keeshond/Keeshond_06238.jpg inflating: dogImages/valid/092.Keeshond/Keeshond_06246.jpg inflating: dogImages/valid/092.Keeshond/Keeshond_06257.jpg inflating: dogImages/valid/092.Keeshond/Keeshond_06266.jpg inflating: dogImages/valid/092.Keeshond/Keeshond_06274.jpg creating: dogImages/valid/093.Kerry_blue_terrier/ inflating: dogImages/valid/093.Kerry_blue_terrier/Kerry_blue_terrier_06294.jpg inflating: dogImages/valid/093.Kerry_blue_terrier/Kerry_blue_terrier_06309.jpg inflating: dogImages/valid/093.Kerry_blue_terrier/Kerry_blue_terrier_06319.jpg inflating: dogImages/valid/093.Kerry_blue_terrier/Kerry_blue_terrier_06321.jpg inflating: dogImages/valid/093.Kerry_blue_terrier/Kerry_blue_terrier_06327.jpg creating: dogImages/valid/094.Komondor/ inflating: dogImages/valid/094.Komondor/Komondor_06340.jpg inflating: dogImages/valid/094.Komondor/Komondor_06346.jpg inflating: dogImages/valid/094.Komondor/Komondor_06351.jpg inflating: dogImages/valid/094.Komondor/Komondor_06364.jpg inflating: dogImages/valid/094.Komondor/Komondor_06374.jpg inflating: dogImages/valid/094.Komondor/Komondor_06375.jpg creating: dogImages/valid/095.Kuvasz/ inflating: dogImages/valid/095.Kuvasz/Kuvasz_06412.jpg inflating: dogImages/valid/095.Kuvasz/Kuvasz_06414.jpg inflating: dogImages/valid/095.Kuvasz/Kuvasz_06421.jpg inflating: dogImages/valid/095.Kuvasz/Kuvasz_06422.jpg inflating: dogImages/valid/095.Kuvasz/Kuvasz_06433.jpg inflating: dogImages/valid/095.Kuvasz/Kuvasz_06435.jpg creating: dogImages/valid/096.Labrador_retriever/ inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06448.jpg inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06453.jpg inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06455.jpg inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06475.jpg inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06483.jpg inflating: dogImages/valid/096.Labrador_retriever/Labrador_retriever_06493.jpg creating: dogImages/valid/097.Lakeland_terrier/ inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06502.jpg inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06512.jpg inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06521.jpg inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06524.jpg inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06531.jpg inflating: dogImages/valid/097.Lakeland_terrier/Lakeland_terrier_06542.jpg creating: dogImages/valid/098.Leonberger/ inflating: dogImages/valid/098.Leonberger/Leonberger_06569.jpg inflating: dogImages/valid/098.Leonberger/Leonberger_06572.jpg inflating: dogImages/valid/098.Leonberger/Leonberger_06584.jpg inflating: dogImages/valid/098.Leonberger/Leonberger_06588.jpg inflating: dogImages/valid/098.Leonberger/Leonberger_06591.jpg inflating: dogImages/valid/098.Leonberger/Leonberger_06595.jpg creating: dogImages/valid/099.Lhasa_apso/ inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06624.jpg inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06638.jpg inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06645.jpg inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06649.jpg inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06660.jpg inflating: dogImages/valid/099.Lhasa_apso/Lhasa_apso_06669.jpg creating: dogImages/valid/100.Lowchen/ inflating: dogImages/valid/100.Lowchen/Lowchen_06674.jpg inflating: dogImages/valid/100.Lowchen/Lowchen_06682.jpg inflating: dogImages/valid/100.Lowchen/Lowchen_06684.jpg inflating: dogImages/valid/100.Lowchen/Lowchen_06708.jpg creating: dogImages/valid/101.Maltese/ inflating: dogImages/valid/101.Maltese/Maltese_06719.jpg inflating: dogImages/valid/101.Maltese/Maltese_06737.jpg inflating: dogImages/valid/101.Maltese/Maltese_06742.jpg inflating: dogImages/valid/101.Maltese/Maltese_06743.jpg inflating: dogImages/valid/101.Maltese/Maltese_06744.jpg inflating: dogImages/valid/101.Maltese/Maltese_06760.jpg creating: dogImages/valid/102.Manchester_terrier/ inflating: dogImages/valid/102.Manchester_terrier/Manchester_terrier_06773.jpg inflating: dogImages/valid/102.Manchester_terrier/Manchester_terrier_06776.jpg inflating: dogImages/valid/102.Manchester_terrier/Manchester_terrier_06784.jpg inflating: dogImages/valid/102.Manchester_terrier/Manchester_terrier_06798.jpg creating: dogImages/valid/103.Mastiff/ inflating: dogImages/valid/103.Mastiff/Mastiff_06810.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06816.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06823.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06830.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06855.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06859.jpg inflating: dogImages/valid/103.Mastiff/Mastiff_06880.jpg creating: dogImages/valid/104.Miniature_schnauzer/ inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06898.jpg inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06903.jpg inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06904.jpg inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06910.jpg inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06915.jpg inflating: dogImages/valid/104.Miniature_schnauzer/Miniature_schnauzer_06917.jpg creating: dogImages/valid/105.Neapolitan_mastiff/ inflating: dogImages/valid/105.Neapolitan_mastiff/Neapolitan_mastiff_06950.jpg inflating: dogImages/valid/105.Neapolitan_mastiff/Neapolitan_mastiff_06952.jpg inflating: dogImages/valid/105.Neapolitan_mastiff/Neapolitan_mastiff_06963.jpg inflating: dogImages/valid/105.Neapolitan_mastiff/Neapolitan_mastiff_06972.jpg creating: dogImages/valid/106.Newfoundland/ inflating: dogImages/valid/106.Newfoundland/Newfoundland_06980.jpg inflating: dogImages/valid/106.Newfoundland/Newfoundland_06987.jpg inflating: dogImages/valid/106.Newfoundland/Newfoundland_07015.jpg inflating: dogImages/valid/106.Newfoundland/Newfoundland_07020.jpg inflating: dogImages/valid/106.Newfoundland/Newfoundland_07029.jpg inflating: dogImages/valid/106.Newfoundland/Newfoundland_07031.jpg creating: dogImages/valid/107.Norfolk_terrier/ inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07040.jpg inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07077.jpg inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07083.jpg inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07084.jpg inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07087.jpg inflating: dogImages/valid/107.Norfolk_terrier/Norfolk_terrier_07092.jpg creating: dogImages/valid/108.Norwegian_buhund/ inflating: dogImages/valid/108.Norwegian_buhund/Norwegian_buhund_07094.jpg inflating: dogImages/valid/108.Norwegian_buhund/Norwegian_buhund_07102.jpg inflating: dogImages/valid/108.Norwegian_buhund/Norwegian_buhund_07105.jpg inflating: dogImages/valid/108.Norwegian_buhund/Norwegian_buhund_07119.jpg creating: dogImages/valid/109.Norwegian_elkhound/ inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07128.jpg inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07131.jpg inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07134.jpg inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07159.jpg inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07164.jpg inflating: dogImages/valid/109.Norwegian_elkhound/Norwegian_elkhound_07178.jpg creating: dogImages/valid/110.Norwegian_lundehund/ inflating: dogImages/valid/110.Norwegian_lundehund/Norwegian_lundehund_07194.jpg inflating: dogImages/valid/110.Norwegian_lundehund/Norwegian_lundehund_07201.jpg inflating: dogImages/valid/110.Norwegian_lundehund/Norwegian_lundehund_07203.jpg inflating: dogImages/valid/110.Norwegian_lundehund/Norwegian_lundehund_07205.jpg creating: dogImages/valid/111.Norwich_terrier/ inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07238.jpg inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07239.jpg inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07248.jpg inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07265.jpg inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07270.jpg inflating: dogImages/valid/111.Norwich_terrier/Norwich_terrier_07271.jpg creating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/ inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07285.jpg inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07309.jpg inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07325.jpg inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07330.jpg inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07333.jpg inflating: dogImages/valid/112.Nova_scotia_duck_tolling_retriever/Nova_scotia_duck_tolling_retriever_07336.jpg creating: dogImages/valid/113.Old_english_sheepdog/ inflating: dogImages/valid/113.Old_english_sheepdog/Old_english_sheepdog_07356.jpg inflating: dogImages/valid/113.Old_english_sheepdog/Old_english_sheepdog_07357.jpg inflating: dogImages/valid/113.Old_english_sheepdog/Old_english_sheepdog_07366.jpg inflating: dogImages/valid/113.Old_english_sheepdog/Old_english_sheepdog_07380.jpg inflating: dogImages/valid/113.Old_english_sheepdog/Old_english_sheepdog_07382.jpg creating: dogImages/valid/114.Otterhound/ inflating: dogImages/valid/114.Otterhound/Otterhound_07396.jpg inflating: dogImages/valid/114.Otterhound/Otterhound_07402.jpg inflating: dogImages/valid/114.Otterhound/Otterhound_07409.jpg inflating: dogImages/valid/114.Otterhound/Otterhound_07421.jpg inflating: dogImages/valid/114.Otterhound/Otterhound_07437.jpg creating: dogImages/valid/115.Papillon/ inflating: dogImages/valid/115.Papillon/Papillon_07438.jpg inflating: dogImages/valid/115.Papillon/Papillon_07445.jpg inflating: dogImages/valid/115.Papillon/Papillon_07448.jpg inflating: dogImages/valid/115.Papillon/Papillon_07468.jpg inflating: dogImages/valid/115.Papillon/Papillon_07484.jpg inflating: dogImages/valid/115.Papillon/Papillon_07487.jpg inflating: dogImages/valid/115.Papillon/Papillon_07495.jpg inflating: dogImages/valid/115.Papillon/Papillon_07496.jpg creating: dogImages/valid/116.Parson_russell_terrier/ inflating: dogImages/valid/116.Parson_russell_terrier/Parson_russell_terrier_07543.jpg inflating: dogImages/valid/116.Parson_russell_terrier/Parson_russell_terrier_07546.jpg inflating: dogImages/valid/116.Parson_russell_terrier/Parson_russell_terrier_07550.jpg inflating: dogImages/valid/116.Parson_russell_terrier/Parson_russell_terrier_07554.jpg creating: dogImages/valid/117.Pekingese/ inflating: dogImages/valid/117.Pekingese/Pekingese_07574.jpg inflating: dogImages/valid/117.Pekingese/Pekingese_07580.jpg inflating: dogImages/valid/117.Pekingese/Pekingese_07583.jpg inflating: dogImages/valid/117.Pekingese/Pekingese_07604.jpg inflating: dogImages/valid/117.Pekingese/Pekingese_07609.jpg inflating: dogImages/valid/117.Pekingese/Pekingese_07612.jpg creating: dogImages/valid/118.Pembroke_welsh_corgi/ inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07638.jpg inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07640.jpg inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07643.jpg inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07657.jpg inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07670.jpg inflating: dogImages/valid/118.Pembroke_welsh_corgi/Pembroke_welsh_corgi_07676.jpg creating: dogImages/valid/119.Petit_basset_griffon_vendeen/ inflating: dogImages/valid/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07693.jpg inflating: dogImages/valid/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07703.jpg inflating: dogImages/valid/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07706.jpg inflating: dogImages/valid/119.Petit_basset_griffon_vendeen/Petit_basset_griffon_vendeen_07713.jpg creating: dogImages/valid/120.Pharaoh_hound/ inflating: dogImages/valid/120.Pharaoh_hound/Pharaoh_hound_07720.jpg inflating: dogImages/valid/120.Pharaoh_hound/Pharaoh_hound_07730.jpg inflating: dogImages/valid/120.Pharaoh_hound/Pharaoh_hound_07744.jpg inflating: dogImages/valid/120.Pharaoh_hound/Pharaoh_hound_07752.jpg inflating: dogImages/valid/120.Pharaoh_hound/Pharaoh_hound_07758.jpg creating: dogImages/valid/121.Plott/ inflating: dogImages/valid/121.Plott/Plott_07771.jpg inflating: dogImages/valid/121.Plott/Plott_07780.jpg inflating: dogImages/valid/121.Plott/Plott_07796.jpg inflating: dogImages/valid/121.Plott/Plott_07802.jpg creating: dogImages/valid/122.Pointer/ inflating: dogImages/valid/122.Pointer/Pointer_07808.jpg inflating: dogImages/valid/122.Pointer/Pointer_07826.jpg inflating: dogImages/valid/122.Pointer/Pointer_07831.jpg inflating: dogImages/valid/122.Pointer/Pointer_07834.jpg creating: dogImages/valid/123.Pomeranian/ inflating: dogImages/valid/123.Pomeranian/Pomeranian_07853.jpg inflating: dogImages/valid/123.Pomeranian/Pomeranian_07863.jpg inflating: dogImages/valid/123.Pomeranian/Pomeranian_07866.jpg inflating: dogImages/valid/123.Pomeranian/Pomeranian_07869.jpg inflating: dogImages/valid/123.Pomeranian/Pomeranian_07881.jpg inflating: dogImages/valid/123.Pomeranian/Pomeranian_07894.jpg creating: dogImages/valid/124.Poodle/ inflating: dogImages/valid/124.Poodle/Poodle_07905.jpg inflating: dogImages/valid/124.Poodle/Poodle_07911.jpg inflating: dogImages/valid/124.Poodle/Poodle_07913.jpg inflating: dogImages/valid/124.Poodle/Poodle_07914.jpg inflating: dogImages/valid/124.Poodle/Poodle_07946.jpg inflating: dogImages/valid/124.Poodle/Poodle_07948.jpg creating: dogImages/valid/125.Portuguese_water_dog/ inflating: dogImages/valid/125.Portuguese_water_dog/Portuguese_water_dog_07983.jpg inflating: dogImages/valid/125.Portuguese_water_dog/Portuguese_water_dog_07992.jpg inflating: dogImages/valid/125.Portuguese_water_dog/Portuguese_water_dog_07996.jpg inflating: dogImages/valid/125.Portuguese_water_dog/Portuguese_water_dog_08002.jpg creating: dogImages/valid/126.Saint_bernard/ inflating: dogImages/valid/126.Saint_bernard/Saint_bernard_08011.jpg inflating: dogImages/valid/126.Saint_bernard/Saint_bernard_08012.jpg inflating: dogImages/valid/126.Saint_bernard/Saint_bernard_08030.jpg inflating: dogImages/valid/126.Saint_bernard/Saint_bernard_08033.jpg creating: dogImages/valid/127.Silky_terrier/ inflating: dogImages/valid/127.Silky_terrier/Silky_terrier_08049.jpg inflating: dogImages/valid/127.Silky_terrier/Silky_terrier_08064.jpg inflating: dogImages/valid/127.Silky_terrier/Silky_terrier_08069.jpg inflating: dogImages/valid/127.Silky_terrier/Silky_terrier_08077.jpg inflating: dogImages/valid/127.Silky_terrier/Silky_terrier_08085.jpg creating: dogImages/valid/128.Smooth_fox_terrier/ inflating: dogImages/valid/128.Smooth_fox_terrier/Smooth_fox_terrier_08099.jpg inflating: dogImages/valid/128.Smooth_fox_terrier/Smooth_fox_terrier_08107.jpg inflating: dogImages/valid/128.Smooth_fox_terrier/Smooth_fox_terrier_08123.jpg inflating: dogImages/valid/128.Smooth_fox_terrier/Smooth_fox_terrier_08127.jpg creating: dogImages/valid/129.Tibetan_mastiff/ inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08137.jpg inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08147.jpg inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08170.jpg inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08173.jpg inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08174.jpg inflating: dogImages/valid/129.Tibetan_mastiff/Tibetan_mastiff_08185.jpg creating: dogImages/valid/130.Welsh_springer_spaniel/ inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08201.jpg inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08206.jpg inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08222.jpg inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08228.jpg inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08235.jpg inflating: dogImages/valid/130.Welsh_springer_spaniel/Welsh_springer_spaniel_08240.jpg creating: dogImages/valid/131.Wirehaired_pointing_griffon/ inflating: dogImages/valid/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08251.jpg inflating: dogImages/valid/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08263.jpg inflating: dogImages/valid/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08266.jpg inflating: dogImages/valid/131.Wirehaired_pointing_griffon/Wirehaired_pointing_griffon_08279.jpg creating: dogImages/valid/132.Xoloitzcuintli/ inflating: dogImages/valid/132.Xoloitzcuintli/Xoloitzcuintli_08298.jpg inflating: dogImages/valid/132.Xoloitzcuintli/Xoloitzcuintli_08299.jpg inflating: dogImages/valid/132.Xoloitzcuintli/Xoloitzcuintli_08301.jpg inflating: dogImages/valid/132.Xoloitzcuintli/Xoloitzcuintli_08304.jpg creating: dogImages/valid/133.Yorkshire_terrier/ inflating: dogImages/valid/133.Yorkshire_terrier/Yorkshire_terrier_08333.jpg inflating: dogImages/valid/133.Yorkshire_terrier/Yorkshire_terrier_08334.jpg inflating: dogImages/valid/133.Yorkshire_terrier/Yorkshire_terrier_08336.jpg inflating: dogImages/valid/133.Yorkshire_terrier/Yorkshire_terrier_08348.jpg ###Markdown > remove unneeded folder ###Code !rm -r "./__MACOSX" ###Output _____no_output_____ ###Markdown Imports ###Code import os from glob import glob from tqdm import tqdm import numpy as np import cv2 import matplotlib.pyplot as plt import torch import torch.optim as optim import torch.nn as nn from torchvision import datasets import torchvision.models as models import torchvision.transforms as transforms from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # Set PIL to be tolerant of image files that are truncated. ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("./lfw/*/*")) dog_files = np.array(glob("./dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) # check if CUDA is available use_cuda = torch.cuda.is_available() ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline show_human_or_dog = True # extract pre-trained face detector if run_on_colab: face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') else: face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image if show_human_or_dog: img = cv2.imread(human_files[0]) else: img = cv2.imread(dog_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. n_detected_humans_in_human_files = 0.0 n_detected_humans_in_dog_files = 0.0 for h in human_files_short: n_detected_humans_in_human_files += face_detector(h) for d in dog_files_short: n_detected_humans_in_dog_files += face_detector(d) human_accuracy = 100*n_detected_humans_in_human_files/len(human_files_short) dog_accuracy = 100*n_detected_humans_in_dog_files/len(dog_files_short) print(f"Human dataset : {human_accuracy:.2f} % humans detected") print(f"Dog dataset : {dog_accuracy:.2f} % humans detected") ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: print("Using CUDA") VGG16 = VGG16.cuda() else: print("Using CPU") ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](https://pytorch.org/vision/stable/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path img = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) # normalize according to pytorch documentation ]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension img = in_transform(img)[:3,:,:].unsqueeze(0) img = img.cuda() ## Return the *index* of the predicted class for that image pred = torch.argmax(VGG16(img).cpu()).item() return pred ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred = VGG16_predict(img_path) return 151 <= pred <= 268 # true/false # @AVTsoof: test VGG16_predict and dog_detector with open("imagenet1000_clsidx_to_labels.txt") as f: idx2label = eval(f.read()) dog_file = dog_files[100] pred = VGG16_predict(dog_file) is_dog = dog_detector(dog_file) print(f"{idx2label[pred]} - is a dog? {is_dog}") ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. n_dogs_detected_in_human_files = 0.0 n_dogs_detected_in_dog_files = 0.0 for h in human_files_short: n_dogs_detected_in_human_files += dog_detector(h) for d in dog_files_short: n_dogs_detected_in_dog_files += dog_detector(d) human_accuracy = 100*n_dogs_detected_in_human_files/len(human_files_short) dog_accuracy = 100*n_dogs_detected_in_dog_files/len(dog_files_short) print(f"Human dataset : {human_accuracy:.2f} % dogs detected") print(f"Dog dataset : {dog_accuracy:.2f} % dogs detected") ###Output Human dataset : 1.00 % dogs detected Dog dataset : 100.00 % dogs detected ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # prepare data loaders (combine dataset and sampler) batch_size = 40 train_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) valid_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) test_transform = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_data = datasets.ImageFolder("dogImages/train", transform=train_transform) valid_data = datasets.ImageFolder("dogImages/valid", transform=valid_transform) test_data = datasets.ImageFolder("dogImages/test", transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True) loaders_scratch = {"train": train_loader, "valid": valid_loader, "test": test_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self, imsize=224, n_classes=133): super(Net, self).__init__() ## Define layers of a CNN # note: based on VGG16 arch self.layers = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2), # imsize=112 nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(128, 256, kernel_size=3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2), # imsize=56 nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(256, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2), # imsize=28 nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2), # imsize=14 nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.LeakyReLU(), nn.MaxPool2d(2, 2), # imsize=7 nn.Flatten(), nn.Linear(7 * 7 * 512, 4096), nn.LeakyReLU(), nn.Dropout(p=0.2), nn.Linear(4096, 4096), nn.LeakyReLU(), nn.Dropout(p=0.2), nn.Linear(4096, n_classes), nn.Softmax(dim=1) # dim=1 for summing accross classes per example (dim 0 holds examples, dim 1 holds classes) ) def forward(self, x): ## Define forward behavior for l in self.layers: x = l(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN try: model_scratch = None del model_scratch except: pass model_scratch = Net() # Test model TEST_SCRATCH_MODEL = False if TEST_SCRATCH_MODEL: data, target = next(iter(train_loader)) y = model_scratch(data) print(data.size(), target.size(), y.size()) y = None del y ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Based on the VGG16 architecture, but with fewer layer for less parameters. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(params=model_scratch.parameters(), lr=0.1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 correct = 0.0 total = 0.0 ################### # train the model # ################### print() # newline n_train_batches = len(loaders['train']) model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() logits = model(data) loss = criterion(logits, target) loss.backward() optimizer.step() ## record the average training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) print('\rEpoch: {}/{} \tTraining Batch: {}/{} \tTraining Loss: {:.6f} '.format( epoch, n_epochs, batch_idx+1, n_train_batches, train_loss ), end='') ###################### # validate the model # ###################### print() # newline n_valid_batches = len(loaders['valid']) model.eval() with torch.no_grad(): for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss logits = model(data) loss = criterion(logits, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # convert output probabilities to predicted class pred = logits.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('\rEpoch: {}/{} \tValidation Batch: {}/{} \tValidation Loss: {:.6f} '.format( epoch, n_epochs, batch_idx+1, n_valid_batches, valid_loss ), end='') # print training/validation statistics print() # newline print('# Epoch: {}/{} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tTest Accuracy: {:.2f}% ({}/{})'.format( epoch, n_epochs, train_loss, valid_loss, 100. * correct / total, int(correct), int(total), )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) print(f"Valid Loss decreased - Saved model checkpoint to: {save_path}") # return trained model return model # move tensors to GPU if CUDA is available if use_cuda: torch.cuda.empty_cache() model_scratch.cuda() # train the model model_scratch = train(5, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) del model_scratch torch.cuda.empty_cache() ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code import os from torchvision import datasets import torchvision.transforms as transforms batch_size = 40 train_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) valid_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder("dogImages/train", transform=train_transform) valid_data = datasets.ImageFolder("dogImages/valid", transform=valid_transform) test_data = datasets.ImageFolder("dogImages/test", transform=test_transform) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True) loaders_transfer = {"train": train_loader, "valid": valid_loader, "test": test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = torch.hub.load('pytorch/vision:v0.10.0', 'resnet50', pretrained=True) model_transfer # freeze features for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc # replace classifier output to 133 classes (number of dog breeds in the dataset) n_classes = 133 model_transfer.fc = nn.Linear(in_features=model_transfer.fc.in_features, out_features=n_classes) # move to GPU if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(params=model_transfer.fc.parameters(), lr=0.003) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 5 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') ###Output Epoch: 1/5 Training Batch: 167/167 Training Loss: 1.944324 Epoch: 1/5 Validation Batch: 21/21 Validation Loss: 0.889983 # Epoch: 1/5 Training Loss: 1.944324 Validation Loss: 0.889983 Test Accuracy: 74.37% (621/835) Valid Loss decreased - Saved model checkpoint to: model_transfer.pt Epoch: 2/5 Training Batch: 167/167 Training Loss: 0.810795 Epoch: 2/5 Validation Batch: 21/21 Validation Loss: 0.788475 # Epoch: 2/5 Training Loss: 0.810795 Validation Loss: 0.788475 Test Accuracy: 79.16% (661/835) Valid Loss decreased - Saved model checkpoint to: model_transfer.pt Epoch: 3/5 Training Batch: 167/167 Training Loss: 0.640774 Epoch: 3/5 Validation Batch: 21/21 Validation Loss: 0.624593 # Epoch: 3/5 Training Loss: 0.640774 Validation Loss: 0.624593 Test Accuracy: 81.92% (684/835) Valid Loss decreased - Saved model checkpoint to: model_transfer.pt Epoch: 4/5 Training Batch: 167/167 Training Loss: 0.596373 Epoch: 4/5 Validation Batch: 21/21 Validation Loss: 0.993138 # Epoch: 4/5 Training Loss: 0.596373 Validation Loss: 0.993138 Test Accuracy: 76.65% (640/835) Epoch: 5/5 Training Batch: 167/167 Training Loss: 0.470275 Epoch: 5/5 Validation Batch: 21/21 Validation Loss: 1.068081 # Epoch: 5/5 Training Loss: 0.470275 Validation Loss: 1.068081 Test Accuracy: 76.17% (636/835) ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.686025 Test Accuracy: 80% (670/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code # example to extract class names and number dir_idx = 0 print(glob("dogImages/train/*")) print(glob("dogImages/train/*")[dir_idx]) print(glob("dogImages/train/*")[dir_idx].split("/")) print(glob("dogImages/train/*")[dir_idx].split("/")[-1]) print(glob("dogImages/train/*")[dir_idx].split("/")[-1].split(".")) c = glob("dogImages/train/*")[dir_idx].split("/")[-1].split(".") c_dict = {int(c[0]): c[1]} print(c_dict) # create class names dict class_dirs = glob("dogImages/train/*") class_names = [c.split("/")[-1].split(".") for c in class_dirs] class_names = {int(c[0]): c[1] for c in class_names} class_names ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] # class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize(225), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) # normalize according to pytorch documentation ]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension img = in_transform(img)[:3,:,:].unsqueeze(0) if use_cuda: img = img.cuda() ## Return the *index* of the predicted class for that image with torch.no_grad(): pred = torch.argmax(model_transfer(img).cpu()).item() return class_names[pred+1] # test predict_breed_transfer() function predict_breed_transfer("dogImages/test/001.Affenpinscher/Affenpinscher_00003.jpg") ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): img_contains = None breed = None ## handle cases for a human face, dog, and neither if face_detector(img_path): img_contains = "human" elif dog_detector(img_path): img_contains = "dog" breed = predict_breed_transfer(img_path) if img_contains == "human": print(f"This human looks like a '{breed}'") elif img_contains == "dog": print(f"This dog is a '{breed}'") else: print("APP ERROR: No dog or human detected!") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code n_min_files = min(len(dog_files), len(human_files)) np.random.randint(0, n_min_files-1, (6,1)) ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_detected_as_human = np.average([face_detector(img) for img in tqdm(human_files_short)]) dog_detected_as_human = np.average([face_detector(img) for img in tqdm(dog_files_short)]) print('human_detected_as_human : {}'.format(human_detected_as_human)) print('dog_detected_as_human : {}'.format(dog_detected_as_human)) ###Output 100%|██████████| 100/100 [00:02<00:00, 36.13it/s] 100%|██████████| 100/100 [00:30<00:00, 3.30it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path) transforms_pipeline = transforms.Compose([transforms.RandomResizedCrop(250), transforms.ToTensor()]) image_tensor = transforms_pipeline(image) image_tensor = image_tensor.unsqueeze(0) if torch.cuda.is_available(): image_tensor = image_tensor.cuda() prediction = VGG16(image_tensor) if torch.cuda.is_available(): prediction = prediction.cpu() index = prediction.data.numpy().argmax() return index # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return (151 <= index <= 265) # true/false # 151~260 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_detected_as_human = np.average([dog_detector(image) for image in human_files_short]) dog_detected_as_human = np.average([dog_detector(image) for image in dog_files_short]) print('human_detected_as_human : {}'.format(human_detected_as_human)) print('dog_detected_as_human : {}'.format(dog_detected_as_human)) ###Output human_detected_as_human : 0.01 dog_detected_as_human : 0.66 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transforms_pipeline = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor()]) train_data = datasets.ImageFolder('/data/dog_images/train', transform=transforms_pipeline) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform=transforms_pipeline) test_data = datasets.ImageFolder('/data/dog_images/test', transform=transforms_pipeline) batch_size = 10 num_workers = 0 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F total_dog_classes = 133 # total classes of dog # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.norm2d1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) size_linear_layer = 500 self.fc1 = nn.Linear(128 * 28 * 28, size_linear_layer) self.fc2 = nn.Linear(size_linear_layer, total_dog_classes) def forward(self, x): x = self.pool(F.relu(self.norm2d1(self.conv1(x)))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 128 * 28 * 28) x = F.relu(self.fc1(x)) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch = model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=100352, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))activation: relu(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(conv2): Conv2d(16, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))activation: relu(pool): MaxPool2d(kernel_size=3, stride=3, padding=0, dilation=1, ceil_mode=False)(conv3): Conv2d(32, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))activation: relu(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False(fc1): Linear(in_features=100352, out_features=500, bias=True)(fc2): Linear(in_features=500, out_features=133, bias=True) (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) if use_cuda: criterion_scratch = criterion_scratch.cuda() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...' .format(valid_loss_min, valid_loss)) valid_loss_min = valid_loss return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.876111 Validation Loss: 4.832679 Validation loss decreased (inf --> 4.832679). Saving model ... Epoch: 2 Training Loss: 4.723843 Validation Loss: 4.608108 Validation loss decreased (4.832679 --> 4.608108). Saving model ... Epoch: 3 Training Loss: 4.585763 Validation Loss: 4.527100 Validation loss decreased (4.608108 --> 4.527100). Saving model ... Epoch: 4 Training Loss: 4.520732 Validation Loss: 4.547612 Epoch: 5 Training Loss: 4.437482 Validation Loss: 4.511413 Validation loss decreased (4.527100 --> 4.511413). Saving model ... Epoch: 6 Training Loss: 4.379553 Validation Loss: 4.508663 Validation loss decreased (4.511413 --> 4.508663). Saving model ... Epoch: 7 Training Loss: 4.321283 Validation Loss: 4.342193 Validation loss decreased (4.508663 --> 4.342193). Saving model ... Epoch: 8 Training Loss: 4.268735 Validation Loss: 4.399857 Epoch: 9 Training Loss: 4.212796 Validation Loss: 4.306961 Validation loss decreased (4.342193 --> 4.306961). Saving model ... Epoch: 10 Training Loss: 4.146228 Validation Loss: 4.283576 Validation loss decreased (4.306961 --> 4.283576). Saving model ... Epoch: 11 Training Loss: 4.122817 Validation Loss: 4.250658 Validation loss decreased (4.283576 --> 4.250658). Saving model ... Epoch: 12 Training Loss: 4.036503 Validation Loss: 4.221124 Validation loss decreased (4.250658 --> 4.221124). Saving model ... Epoch: 13 Training Loss: 3.998610 Validation Loss: 4.234061 Epoch: 14 Training Loss: 3.970603 Validation Loss: 4.522322 Epoch: 15 Training Loss: 3.894277 Validation Loss: 4.155758 Validation loss decreased (4.221124 --> 4.155758). Saving model ... Epoch: 16 Training Loss: 3.852717 Validation Loss: 4.125472 Validation loss decreased (4.155758 --> 4.125472). Saving model ... Epoch: 17 Training Loss: 3.776484 Validation Loss: 4.206441 Epoch: 18 Training Loss: 3.728687 Validation Loss: 4.267205 Epoch: 19 Training Loss: 3.700622 Validation Loss: 4.145487 Epoch: 20 Training Loss: 3.647172 Validation Loss: 4.098069 Validation loss decreased (4.125472 --> 4.098069). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.011537 Test Accuracy: 11% (92/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # I chose RESNET! model_transfer = models.resnet50(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133, bias=True) fc_parameters = model_transfer.fc.parameters() for param in fc_parameters: param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ Resnet is slow, but I know it has good performance for image classification. So I chose Resnet. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...' .format(valid_loss_min, valid_loss)) valid_loss_min = valid_loss return model n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.750213 Validation Loss: 4.494010 Validation loss decreased (inf --> 4.494010). Saving model ... Epoch: 2 Training Loss: 4.435421 Validation Loss: 4.143042 Validation loss decreased (4.494010 --> 4.143042). Saving model ... Epoch: 3 Training Loss: 4.145857 Validation Loss: 3.845803 Validation loss decreased (4.143042 --> 3.845803). Saving model ... Epoch: 4 Training Loss: 3.890805 Validation Loss: 3.533295 Validation loss decreased (3.845803 --> 3.533295). Saving model ... Epoch: 5 Training Loss: 3.649848 Validation Loss: 3.270090 Validation loss decreased (3.533295 --> 3.270090). Saving model ... Epoch: 6 Training Loss: 3.434227 Validation Loss: 3.021046 Validation loss decreased (3.270090 --> 3.021046). Saving model ... Epoch: 7 Training Loss: 3.235353 Validation Loss: 2.833658 Validation loss decreased (3.021046 --> 2.833658). Saving model ... Epoch: 8 Training Loss: 3.050240 Validation Loss: 2.620366 Validation loss decreased (2.833658 --> 2.620366). Saving model ... Epoch: 9 Training Loss: 2.888124 Validation Loss: 2.483087 Validation loss decreased (2.620366 --> 2.483087). Saving model ... Epoch: 10 Training Loss: 2.775599 Validation Loss: 2.321706 Validation loss decreased (2.483087 --> 2.321706). Saving model ... Epoch: 11 Training Loss: 2.632967 Validation Loss: 2.192671 Validation loss decreased (2.321706 --> 2.192671). Saving model ... Epoch: 12 Training Loss: 2.515728 Validation Loss: 2.080832 Validation loss decreased (2.192671 --> 2.080832). Saving model ... Epoch: 13 Training Loss: 2.421598 Validation Loss: 2.027155 Validation loss decreased (2.080832 --> 2.027155). Saving model ... Epoch: 14 Training Loss: 2.347137 Validation Loss: 1.935270 Validation loss decreased (2.027155 --> 1.935270). Saving model ... Epoch: 15 Training Loss: 2.230803 Validation Loss: 1.827137 Validation loss decreased (1.935270 --> 1.827137). Saving model ... Epoch: 16 Training Loss: 2.157842 Validation Loss: 1.743219 Validation loss decreased (1.827137 --> 1.743219). Saving model ... Epoch: 17 Training Loss: 2.096384 Validation Loss: 1.743431 Epoch: 18 Training Loss: 2.020786 Validation Loss: 1.697538 Validation loss decreased (1.743219 --> 1.697538). Saving model ... Epoch: 19 Training Loss: 1.978022 Validation Loss: 1.525336 Validation loss decreased (1.697538 --> 1.525336). Saving model ... Epoch: 20 Training Loss: 1.922164 Validation Loss: 1.509397 Validation loss decreased (1.525336 --> 1.509397). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.577271 Test Accuracy: 69% (581/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code from PIL import Image import torchvision.transforms as transforms ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = loaders_transfer.copy() class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): global model_transfer global transform_pipeline image = Image.open(img_path).convert('RGB') image = transforms_pipeline(image)[:3,:,:].unsqueeze(0) if use_cuda: model_transfer = model_transfer.cuda() image = image.cuda() model_transfer.eval() idx = torch.argmax(model_transfer(image)) return class_names[idx] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) > 0: breed = predict_breed_transfer(img_path) print('human is ' + breed) elif dog_detector(img_path): breed = predict_breed_transfer(img_path) print('dog is ' + breed) else: print('Neither dog nor human.') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output human is Chihuahua human is Basenji human is Bedlington terrier dog is Bullmastiff dog is American staffordshire terrier Neither dog nor human. ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code ## set environment variable to know we're training on Udacity workspace import numpy as np from glob import glob import cv2 import matplotlib.pyplot as plt %matplotlib inline from tqdm import tqdm import torch import torchvision.models as models from PIL import Image import torchvision.transforms as transforms from PIL import ImageFile import os from torchvision import datasets import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim # check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: print('Using GPU...') else: print('Using CPU') udacity = False small = False import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ The model detected a human face in 99% of the `human_files` and in 18% of the `dog_files`.However, using the `face_recognition` module, error of the `dog_files` decreased to 10%. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. count_human = 0 count_dog = 0 for i in tqdm(range(100)): if face_detector(human_files_short[i]): count_human += 1 if face_detector(dog_files_short[i]): count_dog += 1 print('Percentage of Human faces is: {}'.format((count_human/100))) print('Percentage of Dogs with Human faces is: {}'.format((count_dog/100))) ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code try: import face_recognition except ModuleNotFoundError as err: # Error handling !pip install face_recognition ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. count_human = 0 count_dog = 0 # returns "True" if face is detected in image stored at img_path def face_detect(img_path): img = face_recognition.load_image_file(img_path) face_locations = face_recognition.face_locations(img) return len(face_locations) > 0 for i in tqdm(range(100)): if face_detect(human_files_short[i]): count_human += 1 if face_detect(dog_files_short[i]): count_dog += 1 print('Percentage of Human faces is: {}'.format((count_human/100))) print('Percentage of Dogs with Human faces is: {}'.format((count_dog/100))) ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print('Using GPU...') else: print('Using CPU') ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path # normalize the image #image = Image.open(img_path) # in_transform = transforms.Compose([ # transforms.Resize(size), # transforms.ToTensor(), # transforms.Normalize((0.485, 0.456, 0.406), # (0.229, 0.224, 0.225))]) image = load_image(img_path) if use_cuda: image = image.cuda() #image = in_transform(image).unsqueeze(0) ## Return the *index* of the predicted class for that image ## Documentation: ## used code from https://www.kaggle.com/carloalbertobarbano/vgg16-transfer-learning-pytorch ## to recognize that output was prediction for each of 1000 classes. ## used this documentation to convert the single value tensor to a number: ## https://pytorch.org/docs/stable/tensors.html output = VGG16(image) _, preds = torch.max(output.data, 1) return preds.item() # predicted class index # preprocess images # code from Udacity Deep Learning Style Transfer Exercise def load_image(img_path, max_size=400, shape=(224, 224)): ''' Load in and transform an image, making sure the image is <= 400 pixels in the x-y dims.''' if "http" in img_path: response = requests.get(img_path) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(img_path).convert('RGB') # large images will slow down processing if max(image.size) > max_size: size = max_size else: size = max(image.size) if shape is not None: size = shape in_transform = transforms.Compose([ transforms.Resize(size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension image = in_transform(image)[:3,:,:].unsqueeze(0) return image ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. output = VGG16_predict(img_path) return (output >= 151 and output <= 268) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Only 1% of `human_files_short` have a detected dog, while 97% of the `dog_files_short` have a dog detected. ###Code print(dog_detector(human_files_short[0])) image = load_image(dog_files_short[0]) print(image.size()) ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. count_human = 0 count_dog = 0 for i in tqdm(range(100)): if dog_detector(human_files_short[i]): count_human += 1 if dog_detector(dog_files_short[i]): count_dog += 1 print('Percentage of Humans incorrectly classified as dogs is: {}'.format((count_human/100))) print('Percentage of Dogs correctly classified is: {}'.format((count_dog/100))) ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms # from torch.utils.data.sampler import SubsetRandomSampler ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 20 # convert data to a normalized torch.FloatTensor # transforming using the n-channels for (mean1, mean2, mean3)... train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(p=0.5), transforms.RandomRotation(10), transforms.Resize((256, 256)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # choose the training, validation, and test datasets if udacity: train_data = datasets.ImageFolder('/data/dog_images/train', transform=train_transform) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform=transform) test_data = datasets.ImageFolder('/data/dog_images/test', transform=transform) else: train_data = datasets.ImageFolder('./dogimages/train', transform=train_transform) valid_data = datasets.ImageFolder('./dogimages/valid', transform=transform) test_data = datasets.ImageFolder('./dogimages/test', transform=transform) # prepare data loaders (combine dataset and sampler) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader} # print length of TVT splits print('There are {} images in the training set.'.format(len(train_data))) print('There are {} images in the validation set.'.format(len(valid_data))) print('There are {} images in the testing set.'.format(len(test_data))) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:1. I have augmented the dataset using a random horizontal flip with probability p=0.5 and a random rotation of (-10, 10) degrees in order to create a more robust classifier. These are pretty simple but I am hoping they will be enough to work on the limited dataset that is available.2. The code resizes the images by either stretching or downsizing the training images to a 256x256 pixel image, then crops the image to a 224x224 image. This was chosen due to the input size for the VGG16 model as refered to in the Pytorch documentation. Although classifying human images in section 2 worked for larger images (250x250 px), the somewhat random images in the dog faces caused problems. This code could probably be improved by creating a better compression algorithm. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # input is 3x224x224 image self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1) # 16x56x56 self.bnm1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) # 32x28x28 self.bnm2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) # 64x14x14 self.bnm3 = nn.BatchNorm2d(64) self.FC1 = nn.Linear(64*14*14, 1024) # self.FC3 = nn.Linear(1024, 1024) self.FC2 = nn.Linear(1024, 133) self.pool = nn.MaxPool2d(2, 2) self.dropout = nn.Dropout(p=0.4) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) # print(x.shape) x = self.bnm1(x) x = self.pool(F.relu(self.conv2(x))) x = self.bnm2(x) x = self.pool(F.relu(self.conv3(x))) x = self.bnm3(x) # print(x.shape) x = x.view(-1, 64*14*14) x = self.dropout(x) x = F.relu(self.FC1(x)) x = self.FC2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I originally aimed for a similar, yet smaller, instantiation of the VGGNet. That didn't train very well due to the lack of data. I decided to go less deep due to the smaller number of training instances and overall classes.1. Three convolutional layers seemed to provide an appropriate depth. Any more was taking a long time to train without much benefit.2. The CNNs all used a rectified linear unit activation function.2. I pooled each CNN layer to take the dimensions from 224x224 to 14x14. This also helped increase computation time.3. Each layer was then normalized using 2d Batch Normalization. I found this had a minor effect on the training and validation.4. I decided to implement a final two fully connected layers.5. The first fully connected used dropout with a probability of 0.4, which I thought was relatively large, but my network was quickly overfitting and I needed a way to slow it down.6. One fully connected layer with 12544 inputs and 1024 outputs (since 1024 is 1/4 of the VGGNet architecture).7. Finally, a fully connected layer reduces the dimensions to 133, the same number as the number of classes in the dataset.I decided to use a relu activation function because that seems to be the norm for most of the models that I have seen. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.00005) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code resume = False if udacity: from workspace_utils import active_session with active_session(): # do long-running work here # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf # check if loading from previous training if resume: checkpoint = torch.load('model_dog_classifier_ckpt.pt') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch = checkpoint['epoch'] loss = checkpoint['loss'] for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear gradients of optimized variables optimizer.zero_grad() # forward pass: pass inputs to the model output = model(data) # calculate batch loss loss = criterion(output, target) # perform the backward pass: compute the gradient of the loss with respect to model parameters loss.backward() # perform an optimization step optimizer.step() # update the training loss # train_loss += loss.item()*data.size(0) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass output = model(data) # calculate the loss loss = criterion(output, target) # update the average validation loss for the batch # valid_loss += loss.item()*data.size(0) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # calculate average losses # train_loss = train_loss/len(train_loader.dataset) # valid_loss = valid_loss/len(valid_loader.dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # documentation: reused code from cifar10_cnn_exercise if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # save checkpoint #torch.save({ # 'epoch': epoch, # 'model_state_dict': model.state_dict(), # 'optimizer_state_dict': optimizer.state_dict(), # 'loss': train_loss, # }, 'model_dog_classifier_ckpt.pt') # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch_2.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch_2.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code import os from torchvision import datasets import torchvision.transforms as transforms # from torch.utils.data.sampler import SubsetRandomSampler ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 32 # convert data to a normalized torch.FloatTensor # transforming using the n-channels for (mean1, mean2, mean3)... train_transform = transforms.Compose([ transforms.RandomHorizontalFlip(p=0.5), transforms.RandomRotation(10), transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # choose the training, validation, and test datasets if udacity: train_data = datasets.ImageFolder('/data/dog_images/train', transform=train_transform) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform=transform) test_data = datasets.ImageFolder('/data/dog_images/test', transform=transform) else: train_data = datasets.ImageFolder('./dogimages/train', transform=train_transform) valid_data = datasets.ImageFolder('./dogimages/valid', transform=transform) test_data = datasets.ImageFolder('./dogimages/test', transform=transform) # prepare data loaders (combine dataset and sampler) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=False, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers) loaders_transfer = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader} # print length of TVT splits print('There are {} images in the training set.'.format(len(train_data))) print('There are {} images in the validation set.'.format(len(valid_data))) print('There are {} images in the testing set.'.format(len(test_data))) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # download model model_transfer = models.vgg16(pretrained=True) # Freeze training for all features layers for param in model_transfer.features.parameters(): param.requires_grad = False n_inputs = model_transfer.classifier[6].in_features # find number of inputs into final FC layer of VGG16 last_layer = nn.Linear(n_inputs, 133) # 133 total classes in the dog breed classifier model_transfer.classifier[6] = last_layer if use_cuda: model_transfer = model_transfer.cuda() print('Transfered model to cuda') print(model_transfer) print(model_transfer.classifier[6].out_features) ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I kept is simple and replaced the final FC layer with one that output to the 133 classes in the dogs dataset. This was the only one that I reoptimized the parameters and it worked sufficiently (63% accuracy).If I wanted to increase my accuracy, I would probably have optimized the final two FC layers. But I was running low on time and needed to make a decision based on a performance/timing tradeoff. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = load_image(img_path) if use_cuda: img = img.cuda() output = model_transfer(img) _, preds = torch.max(output.data, 1) lab = class_names[preds.item()] return lab ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_scratch(img_path): # load the image and return the predicted breed img = load_image(img_path) if use_cuda: img = img.cuda() output = model_scratch(img) _, preds = torch.max(output.data, 1) lab = class_names[preds.item()] return lab def run_app(img_path): ## handle cases for a human face, dog, and neither dog = None # check for probabilities # See if dog dog = dog_detector(img_path) if dog != True: dog = face_detect(img_path) if dog == True: print('This is a picture of a human') return # True pushes through to the CNN if dog: c = predict_breed_scratch(img_path) print('This picture of a dog looks like a {}'.format(c)) else: print('Error: Please upload an image of either a human or dog.') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)My output is about as good as I expected. Considering the low accuracy of the model that was trained from scratch (a lowly 13%), I don't expect it to get good results. I am not surprised that it couldn't distinguish my Pug, given that Pug wasn't one of the classes so it would never have learned. I am surprised that it didn't default to the Mastiff, which I think pugs sort of look like mini versions of, and instead classified her as a poodle.**Possible Points of Improvement**1. Include more training data. With only ~800 photos, it would be very difficult to train an accurate model with 133 classes.2. Better preproccessing. I think compression algorithms to transform the images to a 224x224 are vital so we don't lose critical information.3. Better hyperparameter optimization. With my selected batch size of 32, the algorithm quickly converged, even with a low learning rate of 0.0001. This meant that I was quickly overfitting the data and only got to train on 4-5 epochs. Unfortunately, I couldn't get satisfactory training with a smaller batch size, which did confuse me. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. kptest = np.array(glob('proj_img/*')) #print('There are {} total test images'.format(len(kptest))) ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3], kptest)): run_app(file) img = cv2.imread(file) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) plt.show() ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ The percentage of human faces in the first 100 images of the human data set: 99.0% The percentage of human faces in the first 100 images of the dog data set: 9.0% ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_in_img= [face_detector(k) for k in human_files_short] dog_in_img= [face_detector(k) for k in dog_files_short] print (dog_in_img) plt.imshow(cv2.imread(dog_files_short[1])) plt.show print("The percentage of human faces in the first 100 images of the human data set: {}%".format(100*sum(human_in_img)/100)) print("The percentage of human faces in the first 100 images of the dog data set: {}%".format(100*sum(dog_in_img)/100)) ###Output [False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, True, True, False, False, False, True, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False] The percentage of human faces in the first 100 images of the human data set: 99.0% The percentage of human faces in the first 100 images of the dog data set: 9.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print (use_cuda) print (VGG16) ###Output True VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img=Image.open(img_path) img_tensor = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])(img).unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() ## Return the *index* of the predicted class for that image tensor_classification = VGG16(img_tensor) # print (tensor_classification) if use_cuda: tensor_classification = tensor_classification.cpu() prediction = tensor_classification.data.numpy().argmax() # print (prediction) if prediction < 0 or prediction >1000: prediction = 0 return prediction # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) if index >= 151 and index <= 268: return True else: return False return None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ The percentage of dogs in the first 100 images of the human data set: 0% The percentage of dogs in the first 100 images of the dog data set: 100% The percentage of dogs in the first 1000 images of the human data set: 0.8% The percentage of dogs in the first 1000 images of the dog data set: 98.2% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_files_short = human_files[:100] dog_files_short = dog_files[:100] dog_in_humanfile = [dog_detector(k) for k in human_files_short] dog_in_dogfile = [dog_detector(k) for k in dog_files_short] print("The percentage of dogs in the first 100 images of the human data set: {}%".format(100*sum(dog_in_humanfile)/100)) print("The percentage of dogs in the first 100 images of the dog data set: {}%".format(100*sum(dog_in_dogfile)/100)) ###Output The percentage of dogs in the first 100 images of the human data set: 1.0% The percentage of dogs in the first 100 images of the dog data set: 100.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes #from PIL import ImageFile #ImageFile.LOAD_TRUNCATED_IMAGES = True # how many samples per batch to load batch_size = 20 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train = transforms.Compose([ transforms.Resize((224,224)), transforms.RandomRotation(45), transforms.ToTensor(), normalize ]) test = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize ]) train_data = datasets.ImageFolder('dogImages/train', transform = train) valid_data = datasets.ImageFolder('dogImages/valid', transform = test) test_data = datasets.ImageFolder('dogImages/test', transform = test) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size,shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=True) loaders_scratch = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: all images were resized to 224 x 224 pixels which is a widely used resolution for images processed by neural networks. The resulting tensor will be of size 224 x 224 x 3 which provides enough information from the image and still can be processed by a deep neural network in a reasonable amount of time, i.e. the input tensor size was chosen to provide a good trade-off between accuracy and performance.I added random rotation to the training data only (not applied to test and validation data) to make the network more robust and allow to detect dogs in various position / rotations. All tensors were normalized with values provded in the link https://pytorch.org/docs/stable/torchvision/models.html (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d( 3, 8, 3) self.conv2 = nn.Conv2d(8, 32, 3) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64,128, 3) self.conv5 = nn.Conv2d(128, 256, 3) # max pooling layer self.pool = nn.MaxPool2d(2, 2) self.dropout = nn.Dropout(0.25) self.fc1 = nn.Linear( 256 * 5*5, 500) #self.fc2 = nn.Linear( 5000, 1000) self.fc2 = nn.Linear(500, 133) def forward(self, x): ## Define forward behavior ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) #x = F.relu(self.conv5(x)) #x = F.relu(self.conv6(x)) x = x.view(-1, 256 *5 *5) self.dropout(x) x = F.relu(self.fc1(x)) self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I applied standard layers to define my network such as convolutional layers for detecting features of an image.As non-linear activation function relu was used. maxpooling and dropout were applied to avoid overfitting.The detail for the various layers can be found below: The 1st convolutional layer received an input tensor of length 224 x 224 x 3 and produced one of length 222 x 222 x 8The maxpooling converts it to a tensor of length 111 x 111 x 8 The 2nd convolutional layer received an input tensor of length 111 x 111 x 16 and produced one of length 109 x 109 x 32The maxpooling converts it to a tensor of length 54 x 54 x 32 The 3rd convolutional layer received an input tensor of length 54 x 54 x 32 and produced one of length 52 x 52 x 64The maxpooling converts it to a tensor of length 26 x 26 x 64 The 4th convolutional layer received an input tensor of length 26 x 26 x 64 and produced one of length 24 x 24 x 128The maxpooling converts it to a tensor of length 12 x 12 x 128 The 5th convolutional layer received an input tensor of length 12 x 12 x 128 and produced one of length 10 x 10 x 256The maxpooling converts it to a tensor of length 5 x 5 x 256 I decided to use two fully connected layer. The input for the first layer is determined by the output of the last convolutional layer and the output of the second layer is determined by the number of final classes to be detected. The amount of nodes in-between was chosen to be somehow in the middle of 5 x 5 x 256 and 133. Note that dropout was applied to reduce the risk of overfitting. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters()) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) train_loss = train_loss/len(train_loader.dataset) valid_loss = valid_loss/len(valid_loader.dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 0.000469 Validation Loss: 0.004543 Validation loss decreased (inf --> 0.004543). Saving model ... Epoch: 2 Training Loss: 0.000456 Validation Loss: 0.004405 Validation loss decreased (0.004543 --> 0.004405). Saving model ... Epoch: 3 Training Loss: 0.000442 Validation Loss: 0.004506 Epoch: 4 Training Loss: 0.000425 Validation Loss: 0.004409 Epoch: 5 Training Loss: 0.000414 Validation Loss: 0.004420 Epoch: 6 Training Loss: 0.000396 Validation Loss: 0.004516 Epoch: 7 Training Loss: 0.000382 Validation Loss: 0.004625 Epoch: 8 Training Loss: 0.000367 Validation Loss: 0.004520 Epoch: 9 Training Loss: 0.000353 Validation Loss: 0.004782 Epoch: 10 Training Loss: 0.000344 Validation Loss: 0.004655 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.655655 Test Accuracy: 14% (121/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # copy from above import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes #from PIL import ImageFile #ImageFile.LOAD_TRUNCATED_IMAGES = True # how many samples per batch to load batch_size = 20 normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train = transforms.Compose([ transforms.Resize((224,224)), transforms.RandomRotation(45), transforms.ToTensor(), normalize ]) test = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize ]) train_data = datasets.ImageFolder('dogImages/train', transform = train) valid_data = datasets.ImageFolder('dogImages/valid', transform = test) test_data = datasets.ImageFolder('dogImages/test', transform = test) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size,shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=True) loaders_transfer = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) # don't re-train the model for param in model_transfer.parameters(): param.require_grad = False # adjust the number of output classes n_inputs = model_transfer.fc.in_features last_layer = nn.Linear(n_inputs, 133, bias=True) model_transfer.fc = last_layer #train the last layer for param in model_transfer.fc.parameters(): param.require_grad = True if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ ResNet50 is a state of the art architecture which has a relative good trade-off between time required for training and resulting accuracy. It was used to classify dogs so it should have enough information for classifing dog breeds as well if the output layer is changed. I keep all layers unchanged (do not retrain) and only adapt the last layer to the required classification problem.Note, some people report to get good results with VGG-19 (cf. https://mc.ai/dog-breeds-classification-with-cnn-transfer-learning/=) but I simply want to try out how it works with resnet50. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.fc.parameters()) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 0.000380 Validation Loss: 0.001389 Validation loss decreased (inf --> 0.001389). Saving model ... Epoch: 2 Training Loss: 0.000173 Validation Loss: 0.000999 Validation loss decreased (0.001389 --> 0.000999). Saving model ... Epoch: 3 Training Loss: 0.000138 Validation Loss: 0.001033 Epoch: 4 Training Loss: 0.000128 Validation Loss: 0.000864 Validation loss decreased (0.000999 --> 0.000864). Saving model ... Epoch: 5 Training Loss: 0.000117 Validation Loss: 0.000999 Epoch: 6 Training Loss: 0.000109 Validation Loss: 0.000941 Epoch: 7 Training Loss: 0.000102 Validation Loss: 0.000931 Epoch: 8 Training Loss: 0.000092 Validation Loss: 0.000962 Epoch: 9 Training Loss: 0.000094 Validation Loss: 0.000979 Epoch: 10 Training Loss: 0.000088 Validation Loss: 0.000986 Epoch: 11 Training Loss: 0.000088 Validation Loss: 0.001002 Epoch: 12 Training Loss: 0.000083 Validation Loss: 0.001017 Epoch: 13 Training Loss: 0.000085 Validation Loss: 0.000968 Epoch: 14 Training Loss: 0.000079 Validation Loss: 0.001057 Epoch: 15 Training Loss: 0.000076 Validation Loss: 0.001067 Epoch: 16 Training Loss: 0.000077 Validation Loss: 0.001090 Epoch: 17 Training Loss: 0.000074 Validation Loss: 0.000979 Epoch: 18 Training Loss: 0.000078 Validation Loss: 0.001001 Epoch: 19 Training Loss: 0.000075 Validation Loss: 0.001045 Epoch: 20 Training Loss: 0.000071 Validation Loss: 0.000995 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.680545 Test Accuracy: 78% (660/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = {'train': train_data, 'valid': valid_data, 'test': test_data} # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img=Image.open(img_path) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) test = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize ]) img_tensor = test(img) img_tensor.unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() output = model_transfer(img_tensor) _, prediction = torch.max(output.data,1) class_result = class_names[prediction-1] return class_result ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) == True or dog_detector(img_path) == True: pred = predict_breed_transfer(img_path) return pred else: return "error" ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)I have no clue about dog breeds. BUT image 4 and 6 seem to be the same breed and are classified the same way. Similar, the first two humans have a cap and are also classified the same way. So this seems to be OK. The output is as expected.The algorithm could be improved by (1) increasing the number of epochs used for training(2) the weights of the pretrained downloaded model are not updated during the training of the dog breeds. This could be done (would lead to longer training times) and improve the result.(3) the network was not trained of classifying humans as dogs. Would be nice labeling to classify a human as dog. This would probably increase the subjecte classification of humans as dog breeds. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below i=0 for file in np.hstack((human_files[:3], dog_files[:3])): pred = run_app(file) print('\nThe picture in the test is {}, and it has {}. '.format(file, pred)) i = i+1 ax = plt.subplot(6, 1, i) ax.axis('off') plt.imshow(np.asarray(Image.open(file))) ###Output The picture in the test is lfw/Adam_Scott/Adam_Scott_0002.jpg, and it has Canaan dog. The picture in the test is lfw/Adam_Scott/Adam_Scott_0001.jpg, and it has Canaan dog. The picture in the test is lfw/Lou_Ye/Lou_Ye_0001.jpg, and it has Airedale terrier. The picture in the test is dogImages/valid/122.Pointer/Pointer_07831.jpg, and it has Plott. The picture in the test is dogImages/valid/122.Pointer/Pointer_07834.jpg, and it has American eskimo dog. The picture in the test is dogImages/valid/122.Pointer/Pointer_07808.jpg, and it has Plott. ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. # For human file detected_human_face = 0 for file in human_files_short: if face_detector(file): detected_human_face += 1 # For dog file detected_dog_face = 0 for file in dog_files_short: if face_detector(file): detected_dog_face += 1 print("Number of files where human face was detected in human files, perform on 100 files)") print(detected_human_face) print("Number of files where dog face was detected in dog files, perform on 100 files)") print(detected_dog_face) ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image from torchvision.datasets.utils import download_url import torchvision.transforms as transforms import random import json def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' data_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = Image.open(img_path) imtu = data_transforms(img).unsqueeze(0) idx = VGG16(imtu.cuda()).cpu().data.numpy().argmax() return idx img_path = random.choice(dog_files) print("Predicted index for file") print(img_path) idx = VGG16_predict(img_path) print(idx) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): idx = VGG16_predict(img_path) if idx >= 151 and idx <= 268: return True else: return False img_path = random.choice(dog_files) print("Predicted index for file") print(img_path) is_dog = dog_detector(img_path) print(is_dog) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. # For human file detected_human_face = 0 for file in human_files_short: if dog_detector(file): detected_human_face += 1 # For dog file detected_dog_face = 0 for file in dog_files_short: if dog_detector(file): detected_dog_face += 1 print("Number of files where human face was detected in human files, perform on 100 files)") print(detected_human_face) print("Number of files where dog was detected in dog files, perform on 100 files)") print(detected_dog_face) ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from torch.utils.data.sampler import SubsetRandomSampler import numpy as np import torchvision.transforms as transforms import torch from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size = 20 data_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # choose the training and test datasets train_data = datasets.ImageFolder( "/data/dog_images/train", transform=data_transforms, ) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=0) test_data = datasets.ImageFolder( "/data/dog_images/test", transform=data_transforms, ) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=0) valid_data = datasets.ImageFolder( "/data/dog_images/valid", transform=data_transforms, ) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=0) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test' : test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.norm1 = nn.BatchNorm2d(16) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.norm2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.norm3 = nn.BatchNorm2d(64) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 28 * 28, 133) self.dropout = nn.Dropout(0.2) def forward(self, x): # add sequence of convolutional and max pooling layers # 224*224 x = self.norm1(self.pool(F.relu(self.conv1(x)))) # 112*112 x = self.norm2(self.pool(F.relu(self.conv2(x)))) # 56*56 x = self.norm3(self.pool(F.relu(self.conv3(x)))) # 28*28 x = x.view(-1,64 * 28 * 28) x = self.dropout(x) x = self.fc1(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() use_cuda = torch.cuda.is_available() # move tensors to GPU if CUDA is available if use_cuda: print("Training on GPU") model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output,target) loss.backward() optimizer.step() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) loss = criterion(output, target) ## update the average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased print('Validation loss ({:.6f} --> {:.6f})'.format(valid_loss_min, valid_loss)) if valid_loss <= valid_loss_min: print('Validation loss decreased. Saving model!'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), 'model_scratch.pt') valid_loss_min = valid_loss return model # train the model model_scratch = train(25, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm human_detections = 0 dog_detections = 0 for file in human_files_short: if face_detector(file): human_detections+=1 for file in dog_files_short: if face_detector(file): dog_detections+=1 print("Humans and dogs detected as human faces:\t{}% {}%" .format(100*(human_detections/len(human_files_short)), 100*(dog_detections/len(dog_files_short)))) ## on the images in human_files_short and dog_files_short. ###Output Humans and dogs detected as human faces: 100.0% 5.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print(use_cuda) ###Output True ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) x = transform(img) x = x.unsqueeze(0) x = x.cuda() #print(x.shape) predictions = VGG16(x) #print(predictions) return predictions.argmax() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): prediction = VGG16_predict(img_path) if 151 <= prediction <= 268: return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 1% of humans are detected as dogs and 99% of dogs are detected as dogs ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_files_short = human_files[:100] dog_files_short = dog_files[:100] img = Image.open(dog_files_short[14]) #plt.imshow(img) print(img.size) resizee = transforms.Resize(256) crop = transforms.CenterCrop(224) img = resizee(img) img = crop(img) plt.imshow(img) print(img.size) #When performing the random crop i will get an error due to attempt of resizing to a larger image than..... #...what the original image is. Instead of resizing it and then cropping this specific image i chose to look away from it. human_detections = 0 dog_detections = 0 human_files_short = human_files[:100] dog_files_short = dog_files[:100] for file in human_files_short: if dog_detector(file): human_detections+=1 for i, file in enumerate(dog_files_short): if dog_detector(file): dog_detections+=1 print("\nHumans detected as dogs:\t{}%" .format(100*(human_detections/len(human_files_short)))) print("Dogs detected as dogs: \t{}%" .format(100*(dog_detections/len(dog_files_short)))) ###Output (400, 400) (224, 224) Humans detected as dogs: 1.0% Dogs detected as dogs: 99.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes #print(glob("/data/dog_images/*/*/*")) def get_datasets(): dir_train = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/train" dir_valid = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/valid" dir_test = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/test" #print(dir_train) #print(dir_train) #print(os.listdir(dir_train[0])) transform_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(0.1), transforms.RandomVerticalFlip(0.1), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) #Found several pictures of dogs that had shape < 224, therefore resizing it to > 224 then random cropping to 224 transform_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) target_transform = None train_set = datasets.ImageFolder(dir_train,transform=transform_train, target_transform=target_transform) valid_set = datasets.ImageFolder(dir_valid,transform=transform_test,target_transform=target_transform) test_set = datasets.ImageFolder(dir_train,transform=transform_test) return {'train': train_set, 'valid': valid_set, 'test': test_set} def get_train_loaders(): dir_train = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/train" dir_valid = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/valid" dir_test = "/home/adrian/Udacity-Deep-Learning/project-dog-classification/dogImages/test" #print(dir_train) #print(dir_train) #print(os.listdir(dir_train[0])) transform_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(0.1), transforms.RandomVerticalFlip(0.1), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) #Found several pictures of dogs that had shape < 224, therefore resizing it to > 224 then random cropping to 224 transform_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) target_transform = None train_set = datasets.ImageFolder(dir_train,transform=transform_train, target_transform=target_transform) valid_set = datasets.ImageFolder(dir_valid,transform=transform_test,target_transform=target_transform) test_set = datasets.ImageFolder(dir_train,transform=transform_test) train_loader = torch.utils.data.DataLoader(train_set, batch_size=4,num_workers=0,shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=4,num_workers=0,shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=4,num_workers=0,shuffle=True) return train_loader, valid_loader, test_loader train_loader, valid_loader, test_loader = get_train_loaders() print("Number of training images: {}" .format(len(train_loader.dataset))) print("Number of validation images: {}" .format(len(valid_loader.dataset))) print("Number of testing images: {}" .format(len(test_loader.dataset))) ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output Number of training images: 6680 Number of validation images: 835 Number of testing images: 6680 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: My code starts by augmenting the training images by picking a random square of size (224,224) from the input image. This was chosen in hope to teach the model to account for dogs that aren't centered in the images. The size is chosen as this is the "standard" for VGG nets. In addition I've added both horizontal and vertical flips in case some of the images are taken of dogs upside-down and to get images from both sides of the dogs. This is not performed on the validation and test data as i would like nothing but resizing the images when "testing" the performance of the model. An approach was used with increasing depths for each layer, going up to a depth of 128 (like we saw in the cifar example). The filters are of size 3x3 with a padding of 1 to keep the same size of the image when convolving. This is inspired from the VGG-net and its simplicty, hoping that it can be used here too. The idea is to start with this, and keep adding convolution layers / increase filter sizes to gather more information if the results are insufficient. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.CNN = nn.Sequential(OrderedDict([ #224x224x3 ('conv1', nn.Conv2d(3, 64, 5, padding=2)), #224x224x64 ('relu1', nn.ReLU()), ('pool1', nn.MaxPool2d(2,2)), #128x128x64 ('conv4', nn.Conv2d(64, 64, 5, padding=2)), ('relu4', nn.ReLU()), ('pool2', nn.MaxPool2d(2,2)), #56x56x128 ('conv6', nn.Conv2d(64,128,3,padding=1)), ('relu6', nn.ReLU()), ('conv7', nn.Conv2d(128, 128, 3, padding=1)), ('relu7', nn.ReLU()), ('pool3', nn.MaxPool2d(2,2)), #28x28x256 ('conv9', nn.Conv2d(128,128,3, padding=1)), ('relu9', nn.ReLU()), ('pool4', nn.MaxPool2d(2,2)), #14x14x512 ('conv10', nn.Conv2d(128,256,3, padding=1)), ('relu10', nn.ReLU()), ('pool5', nn.MaxPool2d(2,2)), #7x7x128 ('conv11', nn.Conv2d(256,512,3, padding=1)), ('relu11', nn.ReLU()), ('pool6', nn.MaxPool2d(2,2)), #3x3x128 ])) self.classifier = nn.Sequential(OrderedDict([ ('norm', nn.BatchNorm1d(3*3*512)), ('Linear1', nn.Linear(3*3*512, 133)), ('output', nn.Softmax(dim=0))])) def forward(self, x): ## Define forward behavior x = self.CNN(x) x = x.view(-1, 3*3*512) x = self.classifier(x) return x def weights_init_normal(m): '''Takes in a module and initializes all linear layers with weight values taken from a normal distribution.''' classname = m.__class__.__name__ if classname.find('Linear') !=-1: y = 1/np.sqrt(m.in_features) m.weight.data.normal_(0, y) m.bias.data.fill_(0) #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() model_scratch.apply(weights_init_normal) print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (CNN): Sequential( (conv1): Conv2d(3, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)) (relu1): ReLU() (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv4): Conv2d(64, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)) (relu4): ReLU() (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu6): ReLU() (conv7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu7): ReLU() (pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv9): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu9): ReLU() (pool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu10): ReLU() (pool5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv11): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (relu11): ReLU() (pool6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (norm): BatchNorm1d(4608, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (Linear1): Linear(in_features=4608, out_features=133, bias=True) (output): Softmax() ) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Using the starting point explained above (in the last answer), using an experimental approach developed the network structure. With unsufficient results for one architecture i would try to increase the depths, and if that proved unsuficcient, try to add several layers within each pooling layer to extract more features. The thought was that having more layers and larger filter-sizes could be able to obtain more information about the specific dog breeds that look alike. It was also desired by me to have a low number of inputs to the fully connected layer (classifier) so that the classification process would become easier. Having a low number of inputs means fewer weights to consider, therefore the image is reduced to a 3x3 width and height. This started at 28x28 in the first iteration, and through experimenting ended up at 3x3. During times of trouble, initialization of weights popped into mind which gave a small increase in performance when using a normal distribution. This was inspired by a previous chapter about initialization of weights. Early i struggled with close to no fall in training/validation loss during training, which showed to be due to gradients being close to 0 (magnitudes of 10⁻6). A solution to this was to use batch-normalization on the inputs from the CNN, this drastically improved the training process. Softmax was used to get a probibalistic output for multiple classes. A dropout layer could be added, but due to effectiveness on test set and time taken to train the model i decided to move on without trying a dropout layer (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code #Found that some of the files were truncated... from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx,(data,target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) #print(target) #print(output) #print(list(model.parameters())[0].grad) loss = criterion(output, target) loss.backward() # for param in model.parameters(): # print(param.grad.data.sum()) # # start debugger # import pdb; pdb.set_trace() optimizer.step() train_loss += loss.item()*data.size(0) ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx,(data,target) in enumerate(loaders['valid']): #target = torch.Tensor([target]).long() # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) #print("now in validation") loss = criterion(output, target) valid_loss += loss.item()*data.size(0) ## update the average validation loss train_loss = train_loss/len(loaders['train']) valid_loss = valid_loss/len(loaders['valid']) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model loaders_scratch = {} loaders_scratch['train'] = train_loader loaders_scratch['valid'] = valid_loader loaders_scratch['test'] = test_loader #loaders_scratch = [train_loader, valid_loader] # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') ###Output Epoch: 1 Training Loss: 19.285450 Validation Loss: 19.157928 Validation loss decreased (inf --> 19.157928). Saving model ... Epoch: 2 Training Loss: 19.141177 Validation Loss: 19.029250 Validation loss decreased (19.157928 --> 19.029250). Saving model ... Epoch: 3 Training Loss: 19.063891 Validation Loss: 18.960940 Validation loss decreased (19.029250 --> 18.960940). Saving model ... Epoch: 4 Training Loss: 18.970514 Validation Loss: 18.890098 Validation loss decreased (18.960940 --> 18.890098). Saving model ... Epoch: 5 Training Loss: 18.947580 Validation Loss: 18.809979 Validation loss decreased (18.890098 --> 18.809979). Saving model ... Epoch: 6 Training Loss: 18.913665 Validation Loss: 18.814597 Epoch: 7 Training Loss: 18.856728 Validation Loss: 18.700400 Validation loss decreased (18.809979 --> 18.700400). Saving model ... Epoch: 8 Training Loss: 18.837283 Validation Loss: 18.650629 Validation loss decreased (18.700400 --> 18.650629). Saving model ... Epoch: 9 Training Loss: 18.777622 Validation Loss: 18.653432 Epoch: 10 Training Loss: 18.762819 Validation Loss: 18.588038 Validation loss decreased (18.650629 --> 18.588038). Saving model ... Epoch: 11 Training Loss: 18.723399 Validation Loss: 18.656323 Epoch: 12 Training Loss: 18.707337 Validation Loss: 18.608146 Epoch: 13 Training Loss: 18.667667 Validation Loss: 18.541907 Validation loss decreased (18.588038 --> 18.541907). Saving model ... Epoch: 14 Training Loss: 18.660917 Validation Loss: 18.597865 Epoch: 15 Training Loss: 18.644737 Validation Loss: 18.547174 Epoch: 16 Training Loss: 18.606807 Validation Loss: 18.519842 Validation loss decreased (18.541907 --> 18.519842). Saving model ... Epoch: 17 Training Loss: 18.615789 Validation Loss: 18.508964 Validation loss decreased (18.519842 --> 18.508964). Saving model ... Epoch: 18 Training Loss: 18.589139 Validation Loss: 18.429721 Validation loss decreased (18.508964 --> 18.429721). Saving model ... Epoch: 19 Training Loss: 18.549689 Validation Loss: 18.452819 Epoch: 20 Training Loss: 18.556715 Validation Loss: 18.421110 Validation loss decreased (18.429721 --> 18.421110). Saving model ... Epoch: 21 Training Loss: 18.528802 Validation Loss: 18.412675 Validation loss decreased (18.421110 --> 18.412675). Saving model ... Epoch: 22 Training Loss: 18.527383 Validation Loss: 18.387088 Validation loss decreased (18.412675 --> 18.387088). Saving model ... Epoch: 23 Training Loss: 18.511452 Validation Loss: 18.343880 Validation loss decreased (18.387088 --> 18.343880). Saving model ... Epoch: 24 Training Loss: 18.487324 Validation Loss: 18.358264 Epoch: 25 Training Loss: 18.446625 Validation Loss: 18.373750 Epoch: 26 Training Loss: 18.467899 Validation Loss: 18.357128 Epoch: 27 Training Loss: 18.443211 Validation Loss: 18.352813 Epoch: 28 Training Loss: 18.449105 Validation Loss: 18.315618 Validation loss decreased (18.343880 --> 18.315618). Saving model ... Epoch: 29 Training Loss: 18.410356 Validation Loss: 18.352482 Epoch: 30 Training Loss: 18.424900 Validation Loss: 18.270390 Validation loss decreased (18.315618 --> 18.270390). Saving model ... Epoch: 31 Training Loss: 18.378414 Validation Loss: 18.229992 Validation loss decreased (18.270390 --> 18.229992). Saving model ... Epoch: 32 Training Loss: 18.381721 Validation Loss: 18.247700 Epoch: 33 Training Loss: 18.371862 Validation Loss: 18.291891 Epoch: 34 Training Loss: 18.353000 Validation Loss: 18.249533 Epoch: 35 Training Loss: 18.357071 Validation Loss: 18.228543 Validation loss decreased (18.229992 --> 18.228543). Saving model ... Epoch: 36 Training Loss: 18.346997 Validation Loss: 18.261340 Epoch: 37 Training Loss: 18.303397 Validation Loss: 18.242759 Epoch: 38 Training Loss: 18.320493 Validation Loss: 18.224498 Validation loss decreased (18.228543 --> 18.224498). Saving model ... Epoch: 39 Training Loss: 18.306130 Validation Loss: 18.177719 Validation loss decreased (18.224498 --> 18.177719). Saving model ... Epoch: 40 Training Loss: 18.299671 Validation Loss: 18.173782 Validation loss decreased (18.177719 --> 18.173782). Saving model ... Epoch: 41 Training Loss: 18.293875 Validation Loss: 18.203985 Epoch: 42 Training Loss: 18.288919 Validation Loss: 18.197790 Epoch: 43 Training Loss: 18.276951 Validation Loss: 18.141213 Validation loss decreased (18.173782 --> 18.141213). Saving model ... Epoch: 44 Training Loss: 18.268841 Validation Loss: 18.199162 Epoch: 45 Training Loss: 18.243090 Validation Loss: 18.132989 Validation loss decreased (18.141213 --> 18.132989). Saving model ... Epoch: 46 Training Loss: 18.214124 Validation Loss: 18.115926 Validation loss decreased (18.132989 --> 18.115926). Saving model ... Epoch: 47 Training Loss: 18.244277 Validation Loss: 18.134920 Epoch: 48 Training Loss: 18.200627 Validation Loss: 18.138793 Epoch: 49 Training Loss: 18.201184 Validation Loss: 18.121214 Epoch: 50 Training Loss: 18.221530 Validation Loss: 18.103005 Validation loss decreased (18.115926 --> 18.103005). Saving model ... Epoch: 51 Training Loss: 18.199537 Validation Loss: 18.070423 Validation loss decreased (18.103005 --> 18.070423). Saving model ... Epoch: 52 Training Loss: 18.205439 Validation Loss: 18.050999 Validation loss decreased (18.070423 --> 18.050999). Saving model ... Epoch: 53 Training Loss: 18.190048 Validation Loss: 18.102977 Epoch: 54 Training Loss: 18.173607 Validation Loss: 18.065859 Epoch: 55 Training Loss: 18.168194 Validation Loss: 18.119129 Epoch: 56 Training Loss: 18.183767 Validation Loss: 18.113972 Epoch: 57 Training Loss: 18.140679 Validation Loss: 18.021614 Validation loss decreased (18.050999 --> 18.021614). Saving model ... Epoch: 58 Training Loss: 18.146377 Validation Loss: 18.183114 Epoch: 59 Training Loss: 18.127241 Validation Loss: 18.089194 Epoch: 60 Training Loss: 18.125827 Validation Loss: 18.024051 Epoch: 61 Training Loss: 18.123758 Validation Loss: 18.034941 Epoch: 62 Training Loss: 18.088862 Validation Loss: 18.031458 Epoch: 63 Training Loss: 18.123147 Validation Loss: 18.083892 Epoch: 64 Training Loss: 18.100566 Validation Loss: 18.080648 Epoch: 65 Training Loss: 18.100039 Validation Loss: 18.114502 Epoch: 66 Training Loss: 18.084043 Validation Loss: 18.043342 Epoch: 67 Training Loss: 18.092819 Validation Loss: 18.015209 Validation loss decreased (18.021614 --> 18.015209). Saving model ... Epoch: 68 Training Loss: 18.095321 Validation Loss: 18.005159 Validation loss decreased (18.015209 --> 18.005159). Saving model ... Epoch: 69 Training Loss: 18.073405 Validation Loss: 17.979117 Validation loss decreased (18.005159 --> 17.979117). Saving model ... Epoch: 70 Training Loss: 18.048949 Validation Loss: 17.954130 Validation loss decreased (17.979117 --> 17.954130). Saving model ... Epoch: 71 Training Loss: 18.059715 Validation Loss: 17.962123 Epoch: 72 Training Loss: 18.048001 Validation Loss: 17.998927 Epoch: 73 Training Loss: 18.046277 Validation Loss: 17.952907 Validation loss decreased (17.954130 --> 17.952907). Saving model ... Epoch: 74 Training Loss: 18.031503 Validation Loss: 17.943236 Validation loss decreased (17.952907 --> 17.943236). Saving model ... Epoch: 75 Training Loss: 18.047481 Validation Loss: 17.991571 Epoch: 76 Training Loss: 18.011437 Validation Loss: 17.875773 Validation loss decreased (17.943236 --> 17.875773). Saving model ... Epoch: 77 Training Loss: 18.004537 Validation Loss: 17.894606 Epoch: 78 Training Loss: 18.003862 Validation Loss: 17.872723 Validation loss decreased (17.875773 --> 17.872723). Saving model ... Epoch: 79 Training Loss: 17.987740 Validation Loss: 17.935979 Epoch: 80 Training Loss: 17.977783 Validation Loss: 17.911206 Epoch: 81 Training Loss: 18.005590 Validation Loss: 17.943553 Epoch: 82 Training Loss: 17.993019 Validation Loss: 17.962283 ###Markdown I decided to stop the training after 82 epochs..... (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() with torch.no_grad(): for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss += loss.item()*data.size(0) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) test_loss = test_loss/len(loaders['test'].dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.414742 Test Accuracy: 17% (1199/6680) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders #Using the same function as defined earlier dog_train_loader, dog_valid_loader, dog_test_loader = get_train_loaders() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture class DogNet(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(DogNet, self).__init__() ## Define layers of a CNN self.CNN = models.vgg16(pretrained=True).features self.vgg_classifier = models.vgg16(pretrained=True).classifier def forward(self, x): ## Define forward behavior x = self.CNN(x) x = x.view(-1, 25088) x = self.vgg_classifier(x) return x def weights_init_normal(m): '''Takes in a module and initializes all linear layers with weight values taken from a normal distribution.''' classname = m.__class__.__name__ if classname.find('Linear') !=-1: y = 1/np.sqrt(m.in_features) m.weight.data.normal_(0, y) m.bias.data.fill_(0) #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_transfer = DogNet() for param in model_transfer.parameters(): param.requires_grad = False model_transfer.vgg_classifier[6]=nn.Linear(4096,133) # check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_transfer = model_transfer.cuda() for name, param in model_transfer.named_parameters(): if param.requires_grad: print(name, param.data) print(model_transfer) ###Output DogNet( (CNN): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (vgg_classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I decided to use the model provided by VGG as a starting point and map the output layer to 133 output nodes. If this proved to be unsufficient i would try to remove a linear layer from the classifier and alter the number of nodes in this layer. An important part was to freeze the weights from the pre-trained model, so that these are used as feature extractors instead of being trained.Changing the last layer to fit the current problem from the dataset proved to be more than enough to get the required accuracy for this task. This does to me seem realistic as the dataset is so small, meaning that the lassifier layers doesn't have to be changed that much. Had it been a larger dataset it might be needed to do larger changes to the classification layers. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.vgg_classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 15 loaders_transfer = dict() loaders_transfer['train'] = dog_train_loader loaders_transfer['valid'] = dog_valid_loader loaders_transfer['test'] = dog_test_loader model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 11.739035 Validation Loss: 4.990242 Validation loss decreased (inf --> 4.990242). Saving model ... Epoch: 2 Training Loss: 6.732201 Validation Loss: 3.230567 Validation loss decreased (4.990242 --> 3.230567). Saving model ... Epoch: 3 Training Loss: 5.633938 Validation Loss: 2.574361 Validation loss decreased (3.230567 --> 2.574361). Saving model ... Epoch: 4 Training Loss: 4.992476 Validation Loss: 2.262355 Validation loss decreased (2.574361 --> 2.262355). Saving model ... Epoch: 5 Training Loss: 4.697567 Validation Loss: 2.070114 Validation loss decreased (2.262355 --> 2.070114). Saving model ... Epoch: 6 Training Loss: 4.583724 Validation Loss: 1.973183 Validation loss decreased (2.070114 --> 1.973183). Saving model ... Epoch: 7 Training Loss: 4.322685 Validation Loss: 1.858107 Validation loss decreased (1.973183 --> 1.858107). Saving model ... Epoch: 8 Training Loss: 4.259547 Validation Loss: 1.774686 Validation loss decreased (1.858107 --> 1.774686). Saving model ... Epoch: 9 Training Loss: 3.960752 Validation Loss: 1.713563 Validation loss decreased (1.774686 --> 1.713563). Saving model ... Epoch: 10 Training Loss: 4.059480 Validation Loss: 1.666813 Validation loss decreased (1.713563 --> 1.666813). Saving model ... Epoch: 11 Training Loss: 3.920043 Validation Loss: 1.635183 Validation loss decreased (1.666813 --> 1.635183). Saving model ... Epoch: 12 Training Loss: 3.839542 Validation Loss: 1.619647 Validation loss decreased (1.635183 --> 1.619647). Saving model ... Epoch: 13 Training Loss: 3.751638 Validation Loss: 1.586891 Validation loss decreased (1.619647 --> 1.586891). Saving model ... Epoch: 14 Training Loss: 3.704416 Validation Loss: 1.553504 Validation loss decreased (1.586891 --> 1.553504). Saving model ... Epoch: 15 Training Loss: 3.686408 Validation Loss: 1.562830 ###Markdown I decided to only run the training for 15 epochs because of the great results. Could likely train more to get a very very high accuracy, but decided to save some time. (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.255862 Test Accuracy: 93% (6245/6680) ###Markdown Please checkout my test function and see why i get so different test loss compared to training and validation loss (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = get_datasets() # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] #print(class_names[0]) def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform(img) img = img.unsqueeze(0) if use_cuda: img = img.cuda() output = model_transfer(img) _, predictions = torch.max(output,dim=1) breed = class_names[predictions] return breed predict_breed_transfer(dog_files[7]) ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) and dog_detector(img_path): print("Detected both human and dog in this picture, the dog breed is: {}" .format(predict_breed_transfer(img_path))) elif face_detector(img_path): print('Its a human! He/she looks like a {}!' .format(predict_breed_transfer(img_path))) elif dog_detector(img_path): print("It's a dog! The breed is: {}" .format(predict_breed_transfer(img_path))) else: print("Neither a dog or a human was detected, something is fishy!") run_app(dog_files[6]) ###Output It's a dog! The breed is: Australian cattle dog ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ It does a pretty good job, but it can seen that it makes some mistakes. The husky was predicted as an Alaskan Malamute, which is very similar!! So i can understand the confusion. The dachs however was mistaken for German pinscher, which is quite different. Also, it understood that Shrek is neither dog or human, which is nice! Jennifer Aniston is according to the program not a human (nor a dog), which is news to me. It also does a decent job of detecting the dog breed in the image with both a dog and a human! Cannot validate this though, as im not sure what breed this is myself....A few comments for improvement: Better classification of the proper breed, even when they are similar. Better detection of Humans.Could be able to classify both dogs and humans in the same picture, classify the proper race of the dog and what breed the human looks like. ###Code import fnmatch path = '/home/adrian/Udacity-Deep-Learning/project-dog-classification/test_images/' imagefiles = [os.path.join(subdir,f) for subdir, dirs, files in os.walk(path) for f in fnmatch.filter(files, '*.jpg')] print(imagefiles) content = [] for item in imagefiles: content.append(item[:item.find('.jpg')].replace(path,"")) print(content) fig=plt.figure(figsize=(8, 8)) columns = 4 rows = 3 for i in range(1, columns*rows +1): img = Image.open(imagefiles[i-1]) fig.add_subplot(rows, columns, i) plt.imshow(img) plt.show() ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for i, file in enumerate(imagefiles): print("The content is: {}" .format(content[i])) run_app(file) print() ###Output The content is: corgi It's a dog! The breed is: Pembroke welsh corgi The content is: dachs It's a dog! The breed is: German pinscher The content is: shrek Neither a dog or a human was detected, something is fishy! The content is: labrador It's a dog! The breed is: Labrador retriever The content is: robertJr Its a human! He/she looks like a Nova scotia duck tolling retriever! The content is: english_bulldog It's a dog! The breed is: Bulldog The content is: golden_retriver It's a dog! The breed is: Golden retriever The content is: dog_and_human Detected both human and dog in this picture, the dog breed is: Borzoi The content is: jennifer Neither a dog or a human was detected, something is fishy! The content is: husky It's a dog! The breed is: Alaskan malamute ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Detection rate of human faces in human_files: __96%__ Detection rate of human faces in dog_files: __18%__ ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. num_h_in_h = 0 num_h_in_d = 0 for img in human_files_short: num_h_in_h += face_detector(img) for img in dog_files_short: num_h_in_d += face_detector(img) print("num_h_in_h: ", num_h_in_h) print("num_h_in_d: ", num_h_in_d) ###Output num_h_in_h: 96 num_h_in_d: 18 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() print("cuda available") print(VGG16) ###Output cuda available VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize]) #transform image to tensor and add batch additional dimension img_tensor = transform(img).unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() output = VGG16(img_tensor) _, pred = torch.max(output, 1) return pred.item() # predicted class index VGG16_predict('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred_class_idx = VGG16_predict(img_path) if pred_class_idx >= 151 and pred_class_idx <=268: return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Detection rate of dogs in human_files: __0%__ Detection rate of dogs in dog_files: __93%__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. num_d_in_d = 0 num_d_in_h = 0 for img in human_files_short: num_d_in_h += dog_detector(img) for img in dog_files_short: num_d_in_d += dog_detector(img) print("num_d_in_d: ", num_d_in_d) print("num_d_in_h: ", num_d_in_h) ###Output num_d_in_d: 93 num_d_in_h: 0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # number of subprocesses to use for data loading data_dir = 'dogImages/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'test/') test_dir = os.path.join(data_dir, 'valid/') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform_train = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ColorJitter(.3,.3,.3), transforms.ToTensor(), normalize ]) transform_test = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize ]) train_data = datasets.ImageFolder(train_dir, transform=transform_train) valid_data = datasets.ImageFolder(valid_dir, transform=transform_test) test_data = datasets.ImageFolder(test_dir, transform=transform_test) # print out some data stats print('Num training images: ', len(train_data)) print('Num valid images: ', len(valid_data)) print('Num test images: ', len(test_data)) num_workers = 0 batch_size = 20 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) loaders_scratch = {"train" : train_loader, "valid" : valid_loader, "test" : test_loader} ###Output Num training images: 6680 Num valid images: 836 Num test images: 835 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: * I **cropped** the images in the train dataset randomly to a size of 224x224 pixel. I did this because this is the required input size for the VGG16 and ResNet-16 architectures.* Additionally I decided to augment the test dataset with a **horizontal flip, a rotation and a color jitter** to have a higher variation in the images.* For the train and validation datasets I have **only resized** the images to 224x224 pixel. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d( 3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.conv4 = nn.Conv2d(64, 128, 3, padding=1) self.conv5 = nn.Conv2d(128, 256, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(256*7*7, 1254) self.fc2 = nn.Linear(1254, 133) self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) x = x.view(-1, 256*7*7) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ __STEP 1 - Convolutional layers:__ To distinguish between different dog breeds is a very tough task sometimes even for humans.From this point of view it make sense to have many convlutional layers in the network to be able to find a lot of fine paterns inside the images, which help to distinguish between different dog breeds. But to be able to finish the training in a finite amount of time, I've limited the network to 5 convolutional layers. Each layer increases the depth of the image. That's why I decided to halve the image size after each layer by using max pooling layers. Image shape: 3x224x224 Image shape after pool(F.relu(self.conv1(x))): 3x112x112 Image shape after pool(F.relu(self.conv1(x))): 16x56x56 Image shape after pool(F.relu(self.conv1(x))): 32x28x28 Image shape after pool(F.relu(self.conv1(x))): 64x14x14 Image shape after pool(F.relu(self.conv1(x))): 128x7x7 __STEP 2: Fully connected layers:__ Finally I used 2 fully connected layers to classify the images.Therfore I have flatten the output of the last convolutional layer to a input vector of the size 1254.There is no really explainable reason why I have used 1254.I have only used it because it is approximately 10 % of the output of the convolutional layer (128x7x7).At the end I added 133 output nodes because there are 133 different labels/classes. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code import time def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): timer_start = time.time() # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() # prep model for training for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update running training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() # prep model for evaluation for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update running validation loss valid_loss += (1 / (batch_idx + 1)) * (loss.data - valid_loss) # timer runtime = time.time() - timer_start # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f} \tRuntime{:7.2f}s'.format( epoch, train_loss, valid_loss, runtime )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.889908 Validation Loss: 4.888586 Runtime 107.15s Validation loss decreased (inf --> 4.888586). Saving model ... Epoch: 2 Training Loss: 4.888313 Validation Loss: 4.886773 Runtime 96.36s Validation loss decreased (4.888586 --> 4.886773). Saving model ... Epoch: 3 Training Loss: 4.885562 Validation Loss: 4.881721 Runtime 96.97s Validation loss decreased (4.886773 --> 4.881721). Saving model ... Epoch: 4 Training Loss: 4.881635 Validation Loss: 4.875771 Runtime 95.74s Validation loss decreased (4.881721 --> 4.875771). Saving model ... Epoch: 5 Training Loss: 4.876317 Validation Loss: 4.868211 Runtime 96.43s Validation loss decreased (4.875771 --> 4.868211). Saving model ... Epoch: 6 Training Loss: 4.869969 Validation Loss: 4.858658 Runtime 96.46s Validation loss decreased (4.868211 --> 4.858658). Saving model ... Epoch: 7 Training Loss: 4.863662 Validation Loss: 4.844830 Runtime 95.90s Validation loss decreased (4.858658 --> 4.844830). Saving model ... Epoch: 8 Training Loss: 4.848485 Validation Loss: 4.825490 Runtime 96.94s Validation loss decreased (4.844830 --> 4.825490). Saving model ... Epoch: 9 Training Loss: 4.828049 Validation Loss: 4.807055 Runtime 96.28s Validation loss decreased (4.825490 --> 4.807055). Saving model ... Epoch: 10 Training Loss: 4.808070 Validation Loss: 4.784428 Runtime 96.81s Validation loss decreased (4.807055 --> 4.784428). Saving model ... Epoch: 11 Training Loss: 4.785473 Validation Loss: 4.758725 Runtime 96.84s Validation loss decreased (4.784428 --> 4.758725). Saving model ... Epoch: 12 Training Loss: 4.776535 Validation Loss: 4.742924 Runtime 96.49s Validation loss decreased (4.758725 --> 4.742924). Saving model ... Epoch: 13 Training Loss: 4.761412 Validation Loss: 4.729467 Runtime 96.85s Validation loss decreased (4.742924 --> 4.729467). Saving model ... Epoch: 14 Training Loss: 4.746068 Validation Loss: 4.712743 Runtime 96.67s Validation loss decreased (4.729467 --> 4.712743). Saving model ... Epoch: 15 Training Loss: 4.725291 Validation Loss: 4.690923 Runtime 96.65s Validation loss decreased (4.712743 --> 4.690923). Saving model ... Epoch: 16 Training Loss: 4.717753 Validation Loss: 4.671410 Runtime 97.47s Validation loss decreased (4.690923 --> 4.671410). Saving model ... Epoch: 17 Training Loss: 4.692985 Validation Loss: 4.659957 Runtime 96.99s Validation loss decreased (4.671410 --> 4.659957). Saving model ... Epoch: 18 Training Loss: 4.679786 Validation Loss: 4.644569 Runtime 96.91s Validation loss decreased (4.659957 --> 4.644569). Saving model ... Epoch: 19 Training Loss: 4.669496 Validation Loss: 4.606017 Runtime 98.17s Validation loss decreased (4.644569 --> 4.606017). Saving model ... Epoch: 20 Training Loss: 4.626706 Validation Loss: 4.583323 Runtime 96.78s Validation loss decreased (4.606017 --> 4.583323). Saving model ... Epoch: 21 Training Loss: 4.594278 Validation Loss: 4.520393 Runtime 96.89s Validation loss decreased (4.583323 --> 4.520393). Saving model ... Epoch: 22 Training Loss: 4.550929 Validation Loss: 4.503693 Runtime 97.00s Validation loss decreased (4.520393 --> 4.503693). Saving model ... Epoch: 23 Training Loss: 4.527341 Validation Loss: 4.455176 Runtime 96.68s Validation loss decreased (4.503693 --> 4.455176). Saving model ... Epoch: 24 Training Loss: 4.490376 Validation Loss: 4.453137 Runtime 96.46s Validation loss decreased (4.455176 --> 4.453137). Saving model ... Epoch: 25 Training Loss: 4.465386 Validation Loss: 4.387620 Runtime 95.97s Validation loss decreased (4.453137 --> 4.387620). Saving model ... Epoch: 26 Training Loss: 4.446713 Validation Loss: 4.405120 Runtime 96.46s Epoch: 27 Training Loss: 4.398166 Validation Loss: 4.334788 Runtime 96.59s Validation loss decreased (4.387620 --> 4.334788). Saving model ... Epoch: 28 Training Loss: 4.403996 Validation Loss: 4.318040 Runtime 96.48s Validation loss decreased (4.334788 --> 4.318040). Saving model ... Epoch: 29 Training Loss: 4.356249 Validation Loss: 4.278036 Runtime 96.45s Validation loss decreased (4.318040 --> 4.278036). Saving model ... Epoch: 30 Training Loss: 4.342297 Validation Loss: 4.266442 Runtime 97.59s Validation loss decreased (4.278036 --> 4.266442). Saving model ... Epoch: 31 Training Loss: 4.313317 Validation Loss: 4.315054 Runtime 96.79s Epoch: 32 Training Loss: 4.276438 Validation Loss: 4.177726 Runtime 96.65s Validation loss decreased (4.266442 --> 4.177726). Saving model ... Epoch: 33 Training Loss: 4.263157 Validation Loss: 4.167035 Runtime 97.31s Validation loss decreased (4.177726 --> 4.167035). Saving model ... Epoch: 34 Training Loss: 4.235760 Validation Loss: 4.194150 Runtime 96.58s Epoch: 35 Training Loss: 4.192644 Validation Loss: 4.094257 Runtime 96.81s Validation loss decreased (4.167035 --> 4.094257). Saving model ... Epoch: 36 Training Loss: 4.198614 Validation Loss: 4.094866 Runtime 96.85s Epoch: 37 Training Loss: 4.143361 Validation Loss: 4.086080 Runtime 96.87s Validation loss decreased (4.094257 --> 4.086080). Saving model ... Epoch: 38 Training Loss: 4.123297 Validation Loss: 4.089072 Runtime 96.73s Epoch: 39 Training Loss: 4.105720 Validation Loss: 3.994169 Runtime 97.04s Validation loss decreased (4.086080 --> 3.994169). Saving model ... Epoch: 40 Training Loss: 4.047090 Validation Loss: 3.965398 Runtime 97.35s Validation loss decreased (3.994169 --> 3.965398). Saving model ... Epoch: 41 Training Loss: 4.022511 Validation Loss: 3.895825 Runtime 96.44s Validation loss decreased (3.965398 --> 3.895825). Saving model ... Epoch: 42 Training Loss: 4.011472 Validation Loss: 3.870903 Runtime 96.55s Validation loss decreased (3.895825 --> 3.870903). Saving model ... Epoch: 43 Training Loss: 3.986262 Validation Loss: 3.846519 Runtime 95.99s Validation loss decreased (3.870903 --> 3.846519). Saving model ... Epoch: 44 Training Loss: 3.941542 Validation Loss: 3.843732 Runtime 96.46s Validation loss decreased (3.846519 --> 3.843732). Saving model ... Epoch: 45 Training Loss: 3.926810 Validation Loss: 3.795060 Runtime 97.14s Validation loss decreased (3.843732 --> 3.795060). Saving model ... Epoch: 46 Training Loss: 3.914898 Validation Loss: 3.725168 Runtime 97.45s Validation loss decreased (3.795060 --> 3.725168). Saving model ... Epoch: 47 Training Loss: 3.870280 Validation Loss: 3.790414 Runtime 97.17s Epoch: 48 Training Loss: 3.835246 Validation Loss: 3.729936 Runtime 96.68s Epoch: 49 Training Loss: 3.832854 Validation Loss: 3.689187 Runtime 97.06s Validation loss decreased (3.725168 --> 3.689187). Saving model ... Epoch: 50 Training Loss: 3.786196 Validation Loss: 3.623253 Runtime 96.73s Validation loss decreased (3.689187 --> 3.623253). Saving model ... Epoch: 51 Training Loss: 3.761487 Validation Loss: 3.598570 Runtime 96.86s Validation loss decreased (3.623253 --> 3.598570). Saving model ... Epoch: 52 Training Loss: 3.736285 Validation Loss: 3.553773 Runtime 96.76s Validation loss decreased (3.598570 --> 3.553773). Saving model ... Epoch: 53 Training Loss: 3.734404 Validation Loss: 3.563005 Runtime 96.98s Epoch: 54 Training Loss: 3.705612 Validation Loss: 3.510515 Runtime 96.98s Validation loss decreased (3.553773 --> 3.510515). Saving model ... Epoch: 55 Training Loss: 3.679149 Validation Loss: 3.458274 Runtime 96.61s Validation loss decreased (3.510515 --> 3.458274). Saving model ... Epoch: 56 Training Loss: 3.641408 Validation Loss: 3.536416 Runtime 96.79s Epoch: 57 Training Loss: 3.635644 Validation Loss: 3.382309 Runtime 96.78s Validation loss decreased (3.458274 --> 3.382309). Saving model ... Epoch: 58 Training Loss: 3.598734 Validation Loss: 3.419606 Runtime 96.68s Epoch: 59 Training Loss: 3.529464 Validation Loss: 3.350229 Runtime 97.23s Validation loss decreased (3.382309 --> 3.350229). Saving model ... Epoch: 60 Training Loss: 3.544816 Validation Loss: 3.340127 Runtime 96.88s Validation loss decreased (3.350229 --> 3.340127). Saving model ... Epoch: 61 Training Loss: 3.522575 Validation Loss: 3.388253 Runtime 96.96s Epoch: 62 Training Loss: 3.485910 Validation Loss: 3.228291 Runtime 97.03s Validation loss decreased (3.340127 --> 3.228291). Saving model ... Epoch: 63 Training Loss: 3.481927 Validation Loss: 3.256554 Runtime 96.33s Epoch: 64 Training Loss: 3.418118 Validation Loss: 3.171918 Runtime 97.20s Validation loss decreased (3.228291 --> 3.171918). Saving model ... Epoch: 65 Training Loss: 3.396686 Validation Loss: 3.164618 Runtime 96.70s Validation loss decreased (3.171918 --> 3.164618). Saving model ... Epoch: 66 Training Loss: 3.393944 Validation Loss: 3.153268 Runtime 96.99s Validation loss decreased (3.164618 --> 3.153268). Saving model ... Epoch: 67 Training Loss: 3.382804 Validation Loss: 3.144147 Runtime 97.28s Validation loss decreased (3.153268 --> 3.144147). Saving model ... Epoch: 68 Training Loss: 3.323323 Validation Loss: 3.104668 Runtime 96.79s Validation loss decreased (3.144147 --> 3.104668). Saving model ... Epoch: 69 Training Loss: 3.351583 Validation Loss: 3.052796 Runtime 96.96s Validation loss decreased (3.104668 --> 3.052796). Saving model ... Epoch: 70 Training Loss: 3.288629 Validation Loss: 3.003000 Runtime 96.77s Validation loss decreased (3.052796 --> 3.003000). Saving model ... Epoch: 71 Training Loss: 3.271273 Validation Loss: 3.044428 Runtime 96.62s Epoch: 72 Training Loss: 3.274087 Validation Loss: 2.977538 Runtime 96.76s Validation loss decreased (3.003000 --> 2.977538). Saving model ... Epoch: 73 Training Loss: 3.219478 Validation Loss: 2.949657 Runtime 97.61s Validation loss decreased (2.977538 --> 2.949657). Saving model ... Epoch: 74 Training Loss: 3.207169 Validation Loss: 2.913297 Runtime 96.46s Validation loss decreased (2.949657 --> 2.913297). Saving model ... Epoch: 75 Training Loss: 3.178570 Validation Loss: 2.883404 Runtime 97.16s Validation loss decreased (2.913297 --> 2.883404). Saving model ... Epoch: 76 Training Loss: 3.143545 Validation Loss: 2.806651 Runtime 97.09s Validation loss decreased (2.883404 --> 2.806651). Saving model ... Epoch: 77 Training Loss: 3.116005 Validation Loss: 2.879760 Runtime 96.32s Epoch: 78 Training Loss: 3.099874 Validation Loss: 2.813783 Runtime 96.66s Epoch: 79 Training Loss: 3.055431 Validation Loss: 2.898245 Runtime 96.81s Epoch: 80 Training Loss: 3.050157 Validation Loss: 2.775574 Runtime 97.15s Validation loss decreased (2.806651 --> 2.775574). Saving model ... Epoch: 81 Training Loss: 3.051364 Validation Loss: 2.802267 Runtime 96.52s Epoch: 82 Training Loss: 2.984555 Validation Loss: 2.656491 Runtime 96.64s Validation loss decreased (2.775574 --> 2.656491). Saving model ... Epoch: 83 Training Loss: 2.987965 Validation Loss: 2.754565 Runtime 96.50s Epoch: 84 Training Loss: 2.958195 Validation Loss: 2.629728 Runtime 96.51s Validation loss decreased (2.656491 --> 2.629728). Saving model ... Epoch: 85 Training Loss: 2.920316 Validation Loss: 2.601881 Runtime 96.57s Validation loss decreased (2.629728 --> 2.601881). Saving model ... Epoch: 86 Training Loss: 2.927987 Validation Loss: 2.651368 Runtime 96.87s Epoch: 87 Training Loss: 2.874266 Validation Loss: 2.490459 Runtime 96.23s Validation loss decreased (2.601881 --> 2.490459). Saving model ... Epoch: 88 Training Loss: 2.859510 Validation Loss: 2.666244 Runtime 97.18s Epoch: 89 Training Loss: 2.869393 Validation Loss: 2.681479 Runtime 96.97s Epoch: 90 Training Loss: 2.843240 Validation Loss: 2.516311 Runtime 96.87s Epoch: 91 Training Loss: 2.779912 Validation Loss: 2.532178 Runtime 97.27s Epoch: 92 Training Loss: 2.805839 Validation Loss: 2.492191 Runtime 96.30s Epoch: 93 Training Loss: 2.728307 Validation Loss: 2.416302 Runtime 96.72s Validation loss decreased (2.490459 --> 2.416302). Saving model ... Epoch: 94 Training Loss: 2.732085 Validation Loss: 2.327367 Runtime 96.51s Validation loss decreased (2.416302 --> 2.327367). Saving model ... Epoch: 95 Training Loss: 2.708487 Validation Loss: 2.313818 Runtime 96.23s Validation loss decreased (2.327367 --> 2.313818). Saving model ... Epoch: 96 Training Loss: 2.703447 Validation Loss: 2.331677 Runtime 96.80s Epoch: 97 Training Loss: 2.680568 Validation Loss: 2.258813 Runtime 96.44s Validation loss decreased (2.313818 --> 2.258813). Saving model ... Epoch: 98 Training Loss: 2.651840 Validation Loss: 2.253940 Runtime 96.91s Validation loss decreased (2.258813 --> 2.253940). Saving model ... Epoch: 99 Training Loss: 2.615215 Validation Loss: 2.271412 Runtime 96.72s Epoch: 100 Training Loss: 2.601213 Validation Loss: 2.255941 Runtime 96.86s ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.547741 Test Accuracy: 20% (173/835) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders train_data = datasets.ImageFolder(train_dir, transform=transform_train) valid_data = datasets.ImageFolder(valid_dir, transform=transform_test) test_data = datasets.ImageFolder(test_dir, transform=transform_test) # print out some data stats print('Num training images: ', len(train_data)) print('Num valid images: ', len(valid_data)) print('Num test images: ', len(test_data)) num_workers = 0 batch_size = 10 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,shuffle=True, num_workers=num_workers) loaders_scratch = {"train" : train_loader, "valid" : valid_loader, "test" : test_loader} loaders_transfer = {"train" : train_loader, "valid" : valid_loader, "test" : test_loader} ###Output Num training images: 6680 Num valid images: 836 Num test images: 835 ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet18(pretrained=True) for param in model_transfer.parameters(): param.require_grad = False #print(model_transfer) last_input = model_transfer.fc.in_features last_layer = nn.Linear(last_input, 133, bias=True) model_transfer.fc = last_layer for param in model_transfer.fc.parameters(): param.require_grad = True # check to see that your last layer produces the expected number of outputs print(model_transfer) if use_cuda: model_transfer = model_transfer.cuda() ###Output ResNet( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) (1): BasicBlock( (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (layer2): Sequential( (0): BasicBlock( (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (layer3): Sequential( (0): BasicBlock( (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (layer4): Sequential( (0): BasicBlock( (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (downsample): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): BasicBlock( (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (avgpool): AvgPool2d(kernel_size=7, stride=1, padding=0) (fc): Linear(in_features=512, out_features=133, bias=True) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I decided to use the pre-trained ResNet-18 network, because my internet research revealed that the ResNet-18 should be faster than VGG-16 network. I don't think that it is the best solution, but it seems to be a good starting point to gain more experience with other architectures. Furthermore I have replaced the last fc layer of the ResNet-18 network with my own fc layer to be ble to classify the 133 dog breeds. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 3.405353 Validation Loss: 1.851149 Runtime 120.79s Validation loss decreased (inf --> 1.851149). Saving model ... Epoch: 2 Training Loss: 1.987495 Validation Loss: 1.284058 Runtime 122.25s Validation loss decreased (1.851149 --> 1.284058). Saving model ... Epoch: 3 Training Loss: 1.630859 Validation Loss: 1.131401 Runtime 122.66s Validation loss decreased (1.284058 --> 1.131401). Saving model ... Epoch: 4 Training Loss: 1.450967 Validation Loss: 0.988890 Runtime 122.16s Validation loss decreased (1.131401 --> 0.988890). Saving model ... Epoch: 5 Training Loss: 1.339684 Validation Loss: 0.914267 Runtime 121.98s Validation loss decreased (0.988890 --> 0.914267). Saving model ... Epoch: 6 Training Loss: 1.261668 Validation Loss: 0.934066 Runtime 121.11s Epoch: 7 Training Loss: 1.171580 Validation Loss: 0.837448 Runtime 121.72s Validation loss decreased (0.914267 --> 0.837448). Saving model ... Epoch: 8 Training Loss: 1.122521 Validation Loss: 0.809336 Runtime 121.77s Validation loss decreased (0.837448 --> 0.809336). Saving model ... Epoch: 9 Training Loss: 1.106454 Validation Loss: 0.794289 Runtime 121.39s Validation loss decreased (0.809336 --> 0.794289). Saving model ... Epoch: 10 Training Loss: 1.055529 Validation Loss: 0.783606 Runtime 122.04s Validation loss decreased (0.794289 --> 0.783606). Saving model ... Epoch: 11 Training Loss: 1.009490 Validation Loss: 0.733466 Runtime 122.78s Validation loss decreased (0.783606 --> 0.733466). Saving model ... Epoch: 12 Training Loss: 0.988301 Validation Loss: 0.692766 Runtime 121.73s Validation loss decreased (0.733466 --> 0.692766). Saving model ... Epoch: 13 Training Loss: 0.963164 Validation Loss: 0.682056 Runtime 121.83s Validation loss decreased (0.692766 --> 0.682056). Saving model ... Epoch: 14 Training Loss: 0.983526 Validation Loss: 0.639600 Runtime 122.14s Validation loss decreased (0.682056 --> 0.639600). Saving model ... Epoch: 15 Training Loss: 0.912481 Validation Loss: 0.631604 Runtime 120.85s Validation loss decreased (0.639600 --> 0.631604). Saving model ... Epoch: 16 Training Loss: 0.901907 Validation Loss: 0.624866 Runtime 122.01s Validation loss decreased (0.631604 --> 0.624866). Saving model ... Epoch: 17 Training Loss: 0.862247 Validation Loss: 0.619594 Runtime 121.57s Validation loss decreased (0.624866 --> 0.619594). Saving model ... Epoch: 18 Training Loss: 0.867266 Validation Loss: 0.590032 Runtime 121.28s Validation loss decreased (0.619594 --> 0.590032). Saving model ... Epoch: 19 Training Loss: 0.851384 Validation Loss: 0.589342 Runtime 121.47s Validation loss decreased (0.590032 --> 0.589342). Saving model ... Epoch: 20 Training Loss: 0.851866 Validation Loss: 0.606390 Runtime 121.59s ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.691584 Test Accuracy: 78% (659/835) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = {'train': train_data, 'valid': valid_data, 'test' : test_data} # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), normalize]) #transform image to tensor and add batch additional dimension img_tensor = transform(img).unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() output = model_transfer(img_tensor) _, pred = torch.max(output.data, 1) return class_names[pred-1] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if dog_detector(img_path) == True or face_detector(img_path) == True: return predict_breed_transfer(img_path) else: return "No dogs or humans were detected in the picture." ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) The output is unfortunately worse than expected. __Possible improvement points:__ 1. Using another pre-trained network instead of ResNet-18. One with more layers e.g. ResNet-50 or even one with a different architecture.2. Try out other optimzers (e.g. Adam) and experiment with various learning rates.3. Increasing the number of epochs.4. Experiment with different types of transformations. 5. Try to use more fc layers and or nodes inside the fc layers. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. import matplotlib.pyplot as plt my_images = np.array(glob('my_images/*')) idx = 0 for img in my_images: print('\nPicture: {} -> {}'.format(img, run_app(img))) idx += 1 ax = plt.subplot(1, 6, idx) ax.axis('off') plt.imshow(Image.open(img)) ###Output Picture: my_images\american-staffordshire-terrier.jpg -> American foxhound Picture: my_images\beagel.jpg -> Basset hound Picture: my_images\boy.jpg -> Smooth fox terrier Picture: my_images\doberman-pinscher.jpg -> French bulldog Picture: my_images\girl.jpg -> Irish water spaniel Picture: my_images\rhino.jpg -> No dogs or humans were detected in the picture. ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def perf_human (images): count = 0 for i in images: if (face_detector(i) == True): count += 1 return ( '{:.2%} human faces detected'.format( count/100)) print (perf_human(human_files_short) + ' in human_files') print (perf_human(dog_files_short) + ' in dog_files') ###Output 98.00% human faces detected in human_files 8.00% human faces detected in dog_files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /home/ivi/.cache/torch/checkpoints/vgg16-397923af.pth ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image ## defining how to transform the image to the sizes and normalization that VGG16 recognizes transform_img = transforms.Compose([ transforms.Resize(size=(224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) ## open the image and then apply the transformation img = Image.open(img_path) img = transform_img(img) ## Add a 4th dimension to the image so it's equal to the batches that VGG16 takes as input img = img.unsqueeze(0) ## put the image as input to the pre-trained model and return a class prediction between 0 and 999 (if it is between 151 and 268 it's a dog) if use_cuda: img = img.cuda() prediction = VGG16(img) prediction = torch.argmax(prediction) return prediction #see one prediction prediction = VGG16_predict('dogImages/test/004.Akita/Akita_00244.jpg') print(prediction) ###Output tensor(248) ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. prediction = VGG16_predict(img_path) if prediction >= 151 and prediction <=268: return True else: return False dog_detector('dogImages/test/004.Akita/Akita_00244.jpg') dog_detector('lfw/Aaron_Eckhart/Aaron_Eckhart_0001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. def perf_dog (images): count = 0 for i in images: if (dog_detector(i) == True): count += 1 return ( '{:.2%} dogs detected'.format( count/100)) print (perf_dog(human_files_short) + ' in human_files') print (perf_dog(dog_files_short) + ' in dog_files') ###Output 0.00% dogs detected in human_files 98.00% dogs detected in dog_files ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. count_humans, count_dogs = 0,0 for file in human_files_short: if face_detector(file) == True: count_humans += 1 for file in dog_files_short: if face_detector(file) == True: count_dogs += 1 print('%.1f%% percentage of the first 100 images in human_files have a detected human face.' % count_humans) print('%.1f%% percentage of the first 100 images in dog_files have a detected human face.' % count_dogs) ###Output 98.0% percentage of the first 100 images in human_files have a detected human face. 17.0% percentage of the first 100 images in dog_files have a detected human face. ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available #if use_cuda: # VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image VGG16.eval() loader = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225))]) image = Image.open(img_path) image = loader(image).float() # needs to be Variable to be accepted by VGG16 #image = Variable(image) image = image.unsqueeze(0) # get predictions, squeeze out, and return as numpy prediction = VGG16(image).squeeze().data.numpy() # np.argmax returns the position of the largest value return np.argmax(prediction) # predicted class index VGG16_predict("/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg") ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. imagenet_class = VGG16_predict(img_path) if imagenet_class >= 151 and imagenet_class <= 268: return True #true, detected a dog else: return False #false, didn't detect a dog dog_detector("/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg") ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ## accessing function below human_files_short = human_files[:100] dog_files_short = dog_files[:100] def assess_dog_detector(h_files, d_files): hf_detected = 0 df_detected = 0 for i in range(len(d_files)): if dog_detector(h_files[i]) > 0: hf_detected += 1 if dog_detector(d_files[i]) > 0: df_detected += 1 #print(f'Model: vgg16') print('Dogs detected in:') percent_human_faces=(hf_detected/len(d_files))*100 percent_dog_faces=(df_detected/len(d_files))*100 #printing answers to this question print(percent_human_faces,'percentage of the images in human_files_short have a detected dog') print(percent_dog_faces,'percentage of the images in dog_files_short have a detected dog') assess_dog_detector(human_files_short, dog_files_short) ###Output Dogs detected in: 0.0 percentage of the images in human_files_short have a detected dog 100.0 percentage of the images in dog_files_short have a detected dog ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # defining training, validation and test data directories data_dir = '/data/dog_images/' train_dir = os.path.join(data_dir, 'train') valid_dir = os.path.join(data_dir, 'valid') test_dir = os.path.join(data_dir, 'test') # creating the transforms data_transforms = { 'train': transforms.Compose([ transforms.RandomAffine(10, translate=[0.1, 0.1], shear=10), transforms.Resize((256,256)), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4,), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ transforms.Resize((230,230)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize((230,230)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } # creating the datasets from folders train_data = datasets.ImageFolder(train_dir, transform=data_transforms['train']) valid_data = datasets.ImageFolder(valid_dir, transform=data_transforms['val']) test_data = datasets.ImageFolder(test_dir, transform=data_transforms['test']) # print out some information on the datasets print(f'Number of training images: {len(train_data)}') print(f'Number of validation images: {len(valid_data)}') print(f'Number of test images: {len(test_data)}') ###Output Number of training images: 6680 Number of validation images: 835 Number of test images: 836 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? ###Code from torch import utils # define dataloader parameters batch_size = 64 num_workers= 0 # prepare data loaders train_loader = utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) test_loader = utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) loaders_scratch = {'train':train_loader,'valid':valid_loader, 'test':test_loader} ###Output _____no_output_____ ###Markdown **Answer**: First we use RandomAffine(10, translate=[0.1, 0.1], shear=10) which rotates the image by $\pm$10 degrees, translate it horizontal and vertical by a fraction (0.1,0.1), and shear it by $\pm$10 degrees. Then we resize the image to (256,256) using Resize((256,256)) and then resize again to (224,224) using RandomResizedCrop(224). Then we use RandomHorizontalFlip to randomly flip the image with a probability of 0.5, and then the ColorJitter(brightness=0.4,contrast=0.4,saturation=0.4) randomly change the brightness, contrast, and saturation of the image.The reason for first resizing the image to (256,256) is so that theRandomResizedCrop has a consistent size to crop from. The images are cropped to (224,224) because this is the size the torchvision models expects.We just have 6680 training images for 133 dog breeds, that only about 50 images per breed. By augmenting the dataset we can artificially increase the number of training images we have. For example, by using horizontal flip we double the number of images from 6680 to 13360. The augmentation of our images helps the network generalize to the data better and thus prevents overfitting. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self, block, layers, num_classes=133): super(Net, self).__init__() ## Define layers of a CNN self.inplanes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3,bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 16, layers[0]) self.layer2 = self._make_layer(block, 32, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2) self.layer4 = self._make_layer(block, 128, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(128, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes: downsample = nn.Sequential( conv1x1(self.inplanes, planes, stride), nn.BatchNorm2d(planes), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): ## Define forward behavior x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net(BasicBlock, [2, 2, 2, 2]) # move tensors to GPU if CUDA is available #if use_cuda: # model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I have implemented the ResNet18 architecture. The whole code is inspired by the PyTorch implementation of ResNet. I chose to implement the resnet architecture to get a deeper understanding of its design and architecture. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(params=model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code class EarlyStopping: """Early stops the training if validation loss dosen't improve after a given patience.""" def __init__(self,patience=5): """ Args: patience (int): How long to wait after last time validation loss improved. """ self.patience = patience self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf def __call__(self, val_loss, model): score = -val_loss if self.best_score is None: self.best_score = score elif score < self.best_score: self.counter += 1 print(f'EarlyStopping counter: {self.counter} out of {self.patience}') if self.counter >= self.patience: self.early_stop = True else: self.best_score = score self.save_checkpoint(val_loss, model) self.counter = 0 def save_checkpoint(self, val_loss, model): '''Saves model when validation loss decrease.''' print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...') torch.save(model.state_dict(), 'model_scratch.pt') self.val_loss_min = val_loss from tqdm import trange from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path,patience=2): early_stopping = EarlyStopping(patience=patience) # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU #if use_cuda: # data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to the model loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU #if use_cuda: # data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass to get net output output = model(data) # calculate the batch loss loss = criterion(output, target) # update validation loss valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased early_stopping(valid_loss, model) if early_stopping.early_stop: print("Early stopping!") break # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt',patience=2) # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 2.780166 Validation Loss: 3.023004 Epoch: 2 Training Loss: 2.781566 Validation Loss: 2.782256 Validation loss decreased (inf --> 2.782256). Saving model ... Epoch: 3 Training Loss: 2.710113 Validation Loss: 2.849597 EarlyStopping counter: 1 out of 2 Epoch: 4 Training Loss: 2.692914 Validation Loss: 3.284783 EarlyStopping counter: 2 out of 2 Early stopping! ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU #if use_cuda: # data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 2.636368 Test Accuracy: 33% (281/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # define dataloader parameters batch_size = 64 num_workers= 0 # prepare data loaders train_loader = utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) test_loader = utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) loaders_transfer = {'train':train_loader,'valid':valid_loader, 'test':test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture class squeezenet(nn.Module): def __init__(self): super(squeezenet, self).__init__() self.num_classes = 133 squeezenet1_1 = models.squeezenet1_1(pretrained=True).features # freeze training for all layers for param in squeezenet1_1.parameters(): param.requires_grad_(False) modules = list(squeezenet1_1.children()) self.features = nn.Sequential(*modules) self.classifier = nn.Sequential( nn.Dropout(0.1), nn.Conv2d(512, self.num_classes, kernel_size=(1, 1), stride=(1, 1)), nn.ReLU(inplace=True), nn.AvgPool2d(kernel_size=13, stride=1, padding=0)) def forward(self, x): x = self.features(x) x = self.classifier(x) return x.view(x.size(0), self.num_classes) ## model architecture model_transfer = squeezenet() #if use_cuda: # model_transfer = model_transfer.cuda() ###Output /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/squeezenet.py:94: UserWarning: nn.init.kaiming_uniform is now deprecated in favor of nn.init.kaiming_uniform_. /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/squeezenet.py:92: UserWarning: nn.init.normal is now deprecated in favor of nn.init.normal_. ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I have chosen the squeezenet1_1 as our pre-trained model because it has considerably good speed as well as accuracy when detecting dogs in images as shown in our assessment earlier. The only thing I change from the original squeezenet is Conv2d(512, 1000, kernel_size=(1, 1), stride=(1, 1)) to nn.Conv2d(512, 133, kernel_size=(1, 1), stride=(1, 1)), where the number of classes are different, and in the forward function where the output is reshaped to match the number of classes return x.view(x.size(0), 133). I have also frozen all the layers in self.features, only the layers in self.classifier will be trained. I thought this way because since our dataset is very small, we risk overfitting our feature extractor to the dataset, and since the squeezenet1_1 is already pre-trained on ImageNet, which contains dog breeds, the feature extractor should already be reasonably good for our dataset. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(params=model_transfer.classifier.parameters()) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(5, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 1.538007 Validation Loss: 1.089367 Epoch: 2 Training Loss: 1.443567 Validation Loss: 0.976692 Validation loss decreased (inf --> 0.976692). Saving model ... Epoch: 3 Training Loss: 1.349352 Validation Loss: 0.996474 EarlyStopping counter: 1 out of 2 Epoch: 4 Training Loss: 1.306788 Validation Loss: 1.180408 EarlyStopping counter: 2 out of 2 Early stopping! ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.171459 Test Accuracy: 66% (558/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] #data_transfers['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed model_transfer.eval() loader = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model (0.229, 0.224, 0.225))]) image = Image.open(img_path) image = loader(image).float() # needs to be Variable to be accepted by model #image = Variable(image) # makes a mini-batch of size 1 image = image.unsqueeze(0) # get predictions, squeeze it out of the 'mini-batch', and return as numpy prediction = model_transfer(image).squeeze().data.numpy() # np.argmax returns the position of the largest value predicted_label_idx = np.argmax(prediction) correct_classification = False true_label_idx = -1 if re.search(r"\d+", img_path) != None: true_label_idx = int(re.search(r"\d+", img_path).group(0)) - 1 return class_names[np.argmax(prediction)], predicted_label_idx, true_label_idx predict_breed_transfer('/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. import re def run_app(img_path): ## handle cases for a human face, dog, and neither img = cv2.imread(img_path) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) ## handle cases for a human face, dog, and neither if face_detector(img_path): dog_breed,predicted_label_idx ,_ = predict_breed_transfer(img_path) print("You are a human") plt.show() print("It is a ... \n" + dog_breed) img = cv2.imread(get_path_to_breed(predicted_label_idx)) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() elif dog_detector(img_path): dog_breed, predicted_label_idx, true_label_idx = predict_breed_transfer(img_path) print(f"The breed is {class_names[true_label_idx]}") plt.show() if true_label_idx == predicted_label_idx: print("It is a ... \n" + dog_breed) else: print("It is a ... \n" + dog_breed) img = cv2.imread(get_path_to_breed(predicted_label_idx)) cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(cv_rgb) plt.show() else: return "error" print("\n-----------------------------------------------------\n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The output is as expected. But when the dog breeds look similiar, the classifier seems to struggle the most. Sometimes it classifies a dog as human, but then goes on to tell the "human" that it looks like the correct dog breed.Overall I think the algorithm performs above expectations.The model can also be used to make fun when it tells a human what dog breed it looks like! And seems to return dogs that actually look like them.Suggested improvements:We could add a separate neural network to better detect humans in the images.I case is there are both a dog and a human in the image, the neural net should be able to detect both.The algorithm will always output a predicted dog breed. But there should be one more class, a no dogs present class.Increasing the training dataset so as to get better predictions. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. def get_path_to_breed(breed_idx = 0): for img_path, label_idx in train_data.imgs: if label_idx == breed_idx : return img_path human_files = []#np.array(glob("computer_hm_images/*")) dog_files = np.array(glob("computer_dg_images/*")) ## suggested code, below for file in np.hstack((dog_files,human_files)): run_app(file) ###Output The breed is Affenpinscher ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](https://github.com/cleysonl/deep-learning-v2-pytorch/blob/master/project-dog-classification/images/sample_dog_output.png?raw=1)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code ! git clone https://github.com/cleysonl/deep-learning-v2-pytorch.git %cd deep-learning-v2-pytorch/project-dog-classification import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def face_detection_test(files): detection_cnt = 0; total_cnt = len(files) for file in files: detection_cnt += face_detector(file) return detection_cnt, total_cnt print("detect face in human_files: {} / {}".format(face_detection_test(human_files_short)[0], face_detection_test(human_files_short)[1])) print("detect face in dog_files: {} / {}".format(face_detection_test(dog_files_short)[0], face_detection_test(dog_files_short)[1])) ###Output detect face in human_files: 100 / 100 detect face in dog_files: 10 / 100 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.cache/torch/checkpoints/vgg16-397923af.pth ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](https://github.com/cleysonl/deep-learning-v2-pytorch/blob/master/project-dog-classification/images/sample_human_output.png?raw=1) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# res_human = np.array([face_detector(i) for i in human_files_short]) print('% of first 100 images in human_files detected human face: ', sum(res_human)/100) res_dog = np.array([face_detector(i) for i in dog_files_short]) print('% of first 100 images in dog_files detected human face: ', sum(res_dog)/100) ###Output % of first 100 images in dog_files detected human face: 0.18 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /Users/ciellagrange/.cache/torch/checkpoints/vgg16-397923af.pth ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = cv2.imread(img_path) pred = VGG16(img) if pred in range(1000): return pred return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. detected_humans = sum([face_detector(f) for f in human_files_short]) / len(human_files_short) detected_dogs = sum([face_detector(f) for f in dog_files_short]) / len(dog_files_short) print(f'{round(detected_humans*100,2)}% of images detected human face') print(f'{round(detected_dogs*100,2)}% of images detected human face') ###Output 98.0% of images detected human face 17.0% of images detected human face ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:14<00:00, 38411072.25it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) img_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor()] ) img = img_transform(img).unsqueeze_(0) prediction = VGG16(img) class_index = int((prediction[0] == prediction[0].max()).nonzero().squeeze()) return class_index # predicted class index # foo = VGG16_predict(dog_files_short[0]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. if 151 <= VGG16_predict(img_path) <= 268: return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. detected_humans = sum([dog_detector(f) for f in human_files_short]) / len(human_files_short) detected_dogs = sum([dog_detector(f) for f in dog_files_short]) / len(dog_files_short) print(f'Dog detected in {round(detected_humans*100,2)}% of images') print(f'Dog detected in {round(detected_dogs*100,2)}% of images') ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code # cd dog_project/ # from six.moves import urllib # opener = urllib.request.build_opener() # opener.addheaders = [('User-agent', 'Mozilla/5.0')] # urllib.request.install_opener(opener) import numpy as np import torch import torchvision.models as models from PIL import Image import torchvision.transforms as transforms use_cuda = torch.cuda.is_available() import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_transform = transforms.Compose([transforms.Resize((224,224)), transforms.CenterCrop(224), transforms.RandomRotation(15), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ) ]) valid_transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ) ]) test_transform = transforms.Compose([transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize( mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ) ]) # train_transform = transforms.Compose([transforms.Resize((28,28)), # transforms.CenterCrop(28), # transforms.RandomRotation(15), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize( # mean=(0.485, 0.456, 0.406), # std=(0.229, 0.224, 0.225) # ) # ]) # test_transform = transforms.Compose([transforms.Resize((28,28)), # transforms.ToTensor() # ]) train_data = datasets.ImageFolder('/data/dog_images/train', transform=train_transform) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform=valid_transform) test_data = datasets.ImageFolder('/data/dog_images/test', transform=test_transform) # train_data = datasets.MNIST(root='data', train=True, # download=True, transform=train_transform) # test_data = datasets.MNIST(root='data', train=False, # download=True, transform=test_transform) print('Num train images: ', len(train_data)) print('Num valid images: ', len(valid_data)) print('Num test images: ', len(test_data)) # define dataloader parameters batch_size = 32 num_workers = 0 # prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) # valid_loader = None test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} import matplotlib.pyplot as plt plt.imshow(train_data[55][0].numpy()[0], cmap='gray') ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:1. The code crops the images to a square with an edge size of 224. This is because VGG16 expects input images of this dimension. TODO: REWRITE THIS2. The only transforms I've applied are the resizing and conversion to a tensor form. I haven't added other augmentation, which, while can be useful, are not strictly required.Also made Shuffle=False for valid and test (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.conv4 = nn.Conv2d(128, 256, 3, padding=1) # self.norm1 = nn.BatchNorm1d(64) self.pool1 = nn.MaxPool2d(2, 2) # self.fc1 = nn.Linear(28*28*512*8, 500) self.fc1 = nn.Linear(14*14*256, 500) # self.norm2 = nn.BatchNorm1d(4096) self.fc2 = nn.Linear(500, 250) self.fc3 = nn.Linear(250, 133) self.dropout = nn.Dropout(p=0.35) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.pool1(x) x = self.dropout(x) x = F.relu(self.conv2(x)) x = self.pool1(x) x = self.dropout(x) x = F.relu(self.conv3(x)) x = self.pool1(x) x = self.dropout(x) x = F.relu(self.conv4(x)) x = self.pool1(x) x = self.dropout(x) x = x.view(-1, x.shape[-1]**2 *x.shape[1]) x = F.relu(self.dropout(self.fc1(x))) x = F.relu(self.dropout(self.fc2(x))) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() torch.cuda.empty_cache() # import torch.nn as nn # import torch.nn.functional as F # # define the CNN architecture # # define the CNN architecture # class Net(nn.Module): # def __init__(self): # super(Net, self).__init__() # self.conv1 = nn.Conv2d(3, 6, 5) # convolutional layer # self.pool = nn.MaxPool2d(2, 2) # max pooling layer # self.conv2 = nn.Conv2d(6, 18, 3) # convolutional layer # self.conv3 = nn.Conv2d(18, 64, 1) # convolutional layer # self.conv4 = nn.Conv2d(64, 128, 3) # convolutional layer # self.fc1 = nn.Linear(359552, 512) # /4 # self.fc2 = nn.Linear(512, 256) # self.fc3 = nn.Linear(256, 133) # self.dropout = nn.Dropout(p=0.3) # def forward(self, x): # # print(f'x shape {x.shape}') # # add sequence of convolutional and max pooling layers # x = F.relu(self.conv1(x)) # # print(f'Shape after conv1 and pool {x.shape}') # x = F.relu(self.conv2(x)) # # print(f'Shape after conv2 and pool {x.shape}') # x = self.pool(F.relu(self.conv3(x))) # # print(f'Shape after conv3 and pool {x.shape}') # x = self.pool(F.relu(self.conv4(x))) # # print(f'Shape after conv4 and pool {x.shape}') # x = x.view(-1, x.shape[-1]**2 *x.shape[1]) # is this right? # # x = x.view(20, 359552) # # print(f'shape after view {x.shape}') # x = F.relu(self.dropout(self.fc1(x))) # # print(f'Shape after fc1 {x.shape}') # x = F.relu(self.dropout(self.fc2(x))) # # print(f'Shape after fc2 {x.shape}') # x = self.fc3(x) # # print(f'Shape after fc3 {x.shape}') # return x # #-#-# You so NOT have to modify the code below this line. #-#-# # # instantiate the CNN # model_scratch = Net() # # move tensors to GPU if CUDA is available # if use_cuda: # model_scratch.cuda() # torch.cuda.empty_cache() model_scratch ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I have read through a couple of ML papers dedicated to a image classification using CNNs and took inspiration from architectures the researchers used. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # print(data.shape) # print(target.shape) # print(target) # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() ## record the average training loss, using something like train_loss = train_loss + ( (1 / (batch_idx + 1) ) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # with torch.no_grad(): # ??? do I need to? output = model(data) loss = criterion(output, target) ## update the average validation loss valid_loss = valid_loss + ( (1 / (batch_idx + 1) ) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print(f'Validation loss decreased ({valid_loss_min} --> {valid_loss}). Saving model ...') torch.save(model_scratch.state_dict(), 'model_scratch.pt') valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): # # from PIL import ImageFile # # ImageFile.LOAD_TRUNCATED_IMAGES = True # """returns trained model""" # # initialize tracker for minimum validation loss # valid_loss_min = np.Inf # for epoch in range(1, n_epochs+1): # # initialize variables to monitor training and validation loss # train_loss = 0.0 # valid_loss = 0.0 # ################### # # train the model # # ################### # model.train() # for batch_idx, (data, target) in enumerate(loaders['train']): # # print(data.shape) # # print(target.shape) # # print(target) # # move to GPU # if use_cuda: # data, target = data.cuda(), target.cuda() # ## find the loss and update the model parameters accordingly # optimizer.zero_grad() # output = model(data) # loss = criterion(output, target) # loss.backward() # optimizer.step() # ## record the average training loss, using something like # train_loss = train_loss + ( (1 / (batch_idx + 1) ) * (loss.data - train_loss)) # ###################### # # validate the model # # ###################### # model.eval() # for batch_idx, (data, target) in enumerate(loaders['valid']): # # move to GPU # if use_cuda: # data, target = data.cuda(), target.cuda() # # with torch.no_grad(): # ??? do I need to? # output = model(data) # loss = criterion(output, target) # ## update the average validation loss # valid_loss = valid_loss + ( (1 / (batch_idx + 1) ) * (loss.data - valid_loss)) # # print training/validation statistics # print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( # epoch, # train_loss, # valid_loss # )) # ## TODO: save the model if validation loss has decreased # if valid_loss <= valid_loss_min: # print(f'Validation loss decreased ({valid_loss_min} --> {valid_loss}). Saving model ...') # torch.save(model_scratch.state_dict(), 'model_scratch.pt') # valid_loss_min = valid_loss # # return trained model # return model # # train the model # model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, # criterion_scratch, use_cuda, 'model_scratch.pt') # # load the model that got the best validation accuracy # model_scratch.load_state_dict(torch.load('model_scratch.pt')) # model_scratch.load_state_dict(torch.load('model_scratch.pt')) # !!! REMOVE THIS CELL # , map_location=lambda storage, loc: storage # model_scratch_state = torch.load('model_scratch.pt') # model_scratch.load_state_dict(model_scratch_state) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class # print(output.shape) pred = output.data.max(1, keepdim=True)[1] # print(pred.shape) # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.857617 Test Accuracy: 1% (10/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code # Instlling pillow 5.4.1 as it is throwing the photoshop variable assignment issue !pip install Pillow==5.4.1 import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Percentage of Number of faces detected in human faces in a batch of 100 images: 98%Percentage of Number of faces detected in dog faces in a batch of 100 images: 17% ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. c=0 for i in tqdm(human_files_short): if face_detector(i): c+=1 print("Percentage of Number of faces detected in human faces in a batch of 100 images: {}%".format(c)) c=0 for i in tqdm(dog_files_short): if face_detector(i): c+=1 print("Percentage of Number of faces detected in dog faces in a batch of 100 images: {}%".format(c)) ###Output 100%|██████████| 100/100 [00:08<00:00, 12.45it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:17<00:00, 32357846.89it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image, ImageFile import torchvision.transforms as transforms ImageFile.LOAD_TRUNCATED_IMAGES = True from tqdm import tqdm def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image_transform=transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(p=0.02), transforms.RandomRotation(45), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image= image_transform(Image.open(img_path)) image = image.unsqueeze(0) if use_cuda: image = image.to('cuda') log_ps = VGG16(image).data.argmax() # ps = torch.exp(log_ps) # pred = torch.max(ps, dim=1) # print(pred[1]) # predicted class index return log_ps ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. value=VGG16_predict(img_path) if(value>=151 and value<=268): return True else: return False # print(dog_detector(dog_files[100])) # if VGG16_predict(img_path) >=151 or VGG16_predict(img_path) <=268: # return True # true/false # else: # return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Percentage of Number of dog faces detected in human faces: 0.0%Percentage of Number of dog faces detected in dog faces: 82.0% ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. c=0 for i in tqdm(human_files_short): if dog_detector(i): c+=1 print("Percentage of Number of dog faces detected in human faces: {}%".format((c/len(human_files_short))*100)) c=0 for i in tqdm(dog_files_short): if dog_detector(i): c+=1 print("Percentage of Number of dog faces detected in dog faces: {}%".format((c/len(dog_files_short))*100)) ###Output 100%|██████████| 100/100 [01:33<00:00, 1.08it/s] 0%| | 0/100 [00:00<?, ?it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! checking the image databaseDowngrading the pillow to not use photoshop ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True %matplotlib inline # Check if CUDA is available use_cuda = torch.cuda.is_available() import torchvision.transforms as transforms # Declare the transforms for train, valid and test sets. # Imitate the VGG-16 model. # Resize images because the input size of VGG-16 is 224x224 # Convert to Tensor # Normalize images because the values of images should be loaded between [0 - 1] # creating the transforms dict transforms = { # Using the random resized crop and horizontalflip to generate data 'train' : transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'valid' : transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'test' : transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) } from torchvision import datasets from torchvision import utils import os num_workers = 0 batch_size = 40 # Creating the image_dataset dictionary from the images in /data/dog_images/* image_datasets = {x: datasets.ImageFolder(os.path.join('/data/dog_images', x), transforms[x]) for x in ['train', 'valid', 'test']} # Data_loaders dictionary data_loaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=num_workers) for x in ['train', 'valid', 'test']} # Decreased the test loader size as suggeste in the student community test_loader = torch.utils.data.DataLoader(image_datasets['test'], shuffle=True, batch_size=15) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- The code uses a transform dictionary which save all the three train, valid and test transforms.Read in the PyTorch docmentation about the ideal size to load into the architecture in 256 * 256 and then centrecropping it to 254*254 image. Later normalizing the image so this helps in learning process for a neural network.- I decided to augment the data as we only have 6680 dataset which doesn't seem to be enough to get the 10% test accuracy so I have added a Random resized crop and a horizontal flip to the training data. and the validation set uses only a reseized centre crop of 224 * 224. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn # define the CNN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() ## VGG-16 Architecture # Size 224 self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) # Size 112 self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) # Size 56 self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) # Size 28 self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) # Size 14 self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) # Batch Normalization layers to normalize each layer output so it improves performance self.batch_norm64 = nn.BatchNorm2d(64) self.batch_norm128 = nn.BatchNorm2d(128) self.batch_norm256 = nn.BatchNorm2d(256) self.batch_norm512 = nn.BatchNorm2d(512) self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout() self.fc1 = nn.Linear(512 * 7 * 7, 4096) self.fc2 = nn.Linear(4096, 4096) self.fc3 = nn.Linear(4096, 133) # Number logits is 133 # Feedforward def forward(self, x): x = self.relu(self.batch_norm64(self.conv1_1(x))) x = self.relu(self.batch_norm64(self.conv1_2(x))) x = self.max_pool(x) x = self.relu(self.batch_norm128(self.conv2_1(x))) x = self.relu(self.batch_norm128(self.conv2_2(x))) x = self.max_pool(x) x = self.relu(self.batch_norm256(self.conv3_1(x))) x = self.relu(self.batch_norm256(self.conv3_2(x))) x = self.relu(self.batch_norm256(self.conv3_3(x))) x = self.max_pool(x) x = self.relu(self.batch_norm512(self.conv4_1(x))) x = self.relu(self.batch_norm512(self.conv4_2(x))) x = self.relu(self.batch_norm512(self.conv4_3(x))) x = self.max_pool(x) x = self.relu(self.batch_norm512(self.conv5_1(x))) x = self.relu(self.batch_norm512(self.conv5_2(x))) x = self.relu(self.batch_norm512(self.conv5_3(x))) x = self.max_pool(x) # It returns a new tensor which has a different size # and it's the same data of self tensor # The -1 means inferring the size from other dimensions. x = x.view(x.size(0), -1) x = self.dropout(self.relu(self.fc1(x))) x = self.dropout(self.relu(self.fc2(x))) x = self.fc3(x) return x # Create CNN instance! model_scratch = Net() # If CUDA is avaliable, Move Tensors to GPU if use_cuda: model_scratch.cuda() # Printing the network architechture print(model_scratch) ###Output Net( (conv1_1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv1_2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2_1): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2_2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3_1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3_2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3_3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv4_1): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv4_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv4_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv5_1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv5_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv5_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (batch_norm64): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (batch_norm128): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (batch_norm256): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (batch_norm512): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (max_pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (relu): ReLU(inplace) (dropout): Dropout(p=0.5) (fc1): Linear(in_features=25088, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=4096, bias=True) (fc3): Linear(in_features=4096, out_features=133, bias=True) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I have just chosen a random architechture with multiple convolution and maxpooling layers and trained the network it did work well That was not able to achieve 10% Test accuracy it ranged in between 4 and 6 percent.So After doing some research in the internet I have implemented the VGG architechture.I was able to obtain a 17% accuracy using this model (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.001, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, train_loader, valid_loader, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### print("Training in epoch: ",epoch) for batch_idx, (data, target) in tqdm(enumerate(train_loader)): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass output = model(data) # calculate batch loss loss = criterion(output, target) # backward pass loss.backward() # parameter update optimizer.step() # update training loss train_loss += loss.item() * data.size(0) ###################### # validate the model # ###################### print("validation in epoch: ",epoch) for batch_idx, (data, target) in tqdm(enumerate(valid_loader)): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass output = model(data) # batch loss loss = criterion(output, target) # update validation loss valid_loss += loss.item() * data.size(0) # calculate average losses train_loss = train_loss / len(train_loader.dataset) valid_loss = valid_loss / len(valid_loader.dataset) # print training/validation statistics print('Epoch: {}\tTraining Loss: {:.6f}\t Validation Loss: {:.6f}'. format(epoch, train_loss, valid_loss)) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model...'. format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model n_epochs = 20 # train the model model_scratch = train(n_epochs, data_loaders['train'], data_loaders['valid'], model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # loads the model_Scratch model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loader, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loader): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % (100. * correct / total, correct, total)) model_scratch.load_state_dict(torch.load('model_scratch.pt')) test(test_loader, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.112371 Test Accuracy: 21% (180/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # importin the modules import torchvision.transforms as transforms from torchvision import datasets from torchvision import utils import os # using the same dataloaders created for the model_scratch project loaders_transfer = data_loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) # Freezing the parameters for param in model_transfer.parameters(): param.required_grad = False # Get the input of the last layer of VGG-16 model_transfer.fc = nn.Linear(2048, 133) for param in model_transfer.fc.parameters(): param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() # printing the resnet model print(model_transfer) ###Output ResNet( (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False) (layer1): Sequential( (0): Bottleneck( (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer2): Sequential( (0): Bottleneck( (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer3): Sequential( (0): Bottleneck( (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (3): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (4): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (5): Bottleneck( (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (layer4): Sequential( (0): Bottleneck( (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) (downsample): Sequential( (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ) (1): Bottleneck( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) (2): Bottleneck( (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False) (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (relu): ReLU(inplace) ) ) (avgpool): AvgPool2d(kernel_size=7, stride=1, padding=0) (fc): Linear(in_features=2048, out_features=133, bias=True) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ After a brief research in the internet I start using the resnet50 architechture because of the following points- Won 1st place in the ILSVRC 2015 classification competition with top-5 error rate of 3.57% (An ensemble model)- Won the 1st place in ILSVRC and COCO 2015 competition in ImageNet Detection, ImageNet localization, Coco detection and Coco segmentation.- Replacing VGG-16 layers in Faster R-CNN with ResNet-101. They observed a relative improvements of 28%- Efficiently trained networks with 100 layers and 1000 layers also.I have implemented a VGG16 architechtue in the model_scratch so I wanted to test the resnet architechtue as it already has proven results in image classification problems (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.fc.parameters(), lr=0.0001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer['train'],loaders_transfer['valid'], model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) # model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(test_loader, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.698367 Test Accuracy: 82% (693/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. import torchvision.transforms as transforms from torch.autograd import Variable # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in image_datasets['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path) image_transform=transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) image = image_transform(image) image = image.unsqueeze(0) if use_cuda: output = model_transfer.forward(Variable(image.cuda())).cpu() else: output = model_transfer.forward(Variable(image)) value = class_names[output.data.numpy().argmax()] return value value = predict_breed_transfer(dog_files[700]) ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither detect_dog = dog_detector(img_path) detect_human = face_detector(img_path) image = Image.open(img_path) plt.imshow(image) plt.show() if detect_dog: print("Dog detected.\nPredicted breed is {}".format(predict_breed_transfer(img_path))) elif detect_human: print("Human face detected.\nYou look similar to {}".format(predict_breed_transfer(img_path))) else: print("The model is not able to see a human or a dog in this picture") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The model gave a 82% accuracy on the test data set. The output is not better than expected1. one imporvement can be to train the model_transfer with more data to get a better accuracy so it can predict better2. should have trained a model for morethan 20 epochs as it is taking large time for each epoch. udacity workspace time limits. The error could reduce on training the model for more epochs3. The model did not detect a dog when it is actually a dog this is to be checked ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[3:6], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def assess_face_detector_performance(images): """ Given an iterable of images returns the percentage of image in which a face was detected. """ count = 0 for image in images: count += face_detector(image) # using the fact that boolean is equivalent to an int in Python return count / len(images) * 100 def print_face_detector_performance(percentage, dataset_name): """ Given a performance percentage (float) and a dataset name the function prints the message with these data to the standard output. """ print(f'Performance of the face detector algorithm on the images in {dataset_name} is {percentage}.') print_face_detector_performance(assess_face_detector_performance(human_files_short), 'human_files_short') print_face_detector_performance(assess_face_detector_performance(dog_files_short), 'dog_files_short') ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) dog_files ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Among all human images we detected 98% faces and among all dog images about 17% of faces were detected. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. true_human = 0 true_dog = 0 for i in tqdm(range(100)): if face_detector(human_files_short[i]): true_human += 1 if face_detector(dog_files_short[i]): true_dog += 1 print('Human Files: %human faces detected = {:5f}'.format(true_human/100)) print('Dog Files: %human faces detected = {:5f}'.format(true_dog/100)) ###Output 100%|██████████| 100/100 [00:31<00:00, 3.18it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:07<00:00, 78052158.36it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # load color (BGR) image img = Image.open(img_path) # normalize data_transforms = transforms.Compose([ transforms.Resize((224, 224)), # to make non-square images fit transforms.ToTensor(), # standardizes to [0,1] as well transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img = data_transforms(img) # add batch dimension as dummy img = img.unsqueeze(0) # prediciton step and extract class index if use_cuda: img = img.cuda() prediction = VGG16(img) if use_cuda: prediction = prediction.cpu() idx = prediction.data.numpy().argmax() return idx # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. breed = VGG16_predict(img_path) return (breed >= 151) and (breed <=268) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ About 1% of images in human_files_short and 100% of images in dog_files_short have a detected dog. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. true_human = 0 true_dog = 0 for i in tqdm(range(100)): if dog_detector(human_files_short[i]): true_human += 1 if dog_detector(dog_files_short[i]): true_dog += 1 print('Human Files: %dogs detected = {:5f}'.format(true_human/100)) print('Dog Files: %dogs detected = {:5f}'.format(true_dog/100)) ###Output 100%|██████████| 100/100 [00:07<00:00, 14.17it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import torch import glob import os import cv2 import numpy as np import matplotlib.pyplot as plt %matplotlib inline from torchvision import datasets, transforms # to avoid truncate error from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True # check if CUDA is available use_cuda = torch.cuda.is_available() ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # torch.manual_seed(1984) data_dir = '/data/dog_images/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') # transformations to augment the images train_data_transform = transforms.Compose([ transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) test_data_transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5],[0.5, 0.5, 0.5]) ]) raw_data_transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor() ]) train_data = datasets.ImageFolder(train_dir, transform=train_data_transform) valid_data = datasets.ImageFolder(valid_dir, transform=test_data_transform) test_data = datasets.ImageFolder(test_dir, transform=test_data_transform) raw_data = datasets.ImageFolder(train_dir, transform=raw_data_transform) # define dataloader batch_size = 64 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False) raw_loader = torch.utils.data.DataLoader(raw_data, batch_size=batch_size, shuffle=False) loaders_scratch = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown check shape of imagesraw_loader = torch.utils.data.DataLoader(raw_data, batch_size=batch_size, shuffle=False)too_small = 0for img_path in dog_files: img = cv2.imread(img_path) if img.shape[0] < 199 or img.shape[1] < 199: print(img.shape, img_path) too_small += 1print('Total too small', too_small) ###Code classes = [item[27:-1] for item in sorted(glob.glob('/data/dog_images/train/*/'))] # classes dataiter = iter(raw_loader) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(min(batch_size, 20)): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) plt.imshow(np.transpose(images[idx], (1, 2, 0))) ax.set_title(classes[labels[idx]]) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: Resize to 224x224 which is also the vgg15 input size to have better performance comparison (by resizing as images seem to of very different shape). Augmentation by horizontal flipping and random rotation to have some degree of variation but avoid upside down images (very different from test cases). Finally, a normalization of pixel values. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # input (224, 224) self.conv1 = nn.Conv2d( 3, 64, 3, padding=1) self.conv2 = nn.Conv2d( 64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.conv4 = nn.Conv2d(256, 256, 3, padding=1) self.conv5 = nn.Conv2d(256, 256, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(7 * 7 * 256, 1000) self.fc2 = nn.Linear(1000, 500) self.fc3 = nn.Linear(500, 133) self.drop = nn.Dropout(p=0.3) def forward(self, x): # Convolution Layers - Feature Maps x = self.conv1(x) x = F.relu(self.pool(x)) # 112 x 112 x = self.conv2(x) x = F.relu(self.pool(x)) # 56 x 56 x = self.conv3(x) x = F.relu(self.pool(x)) # 28 x 28 x = self.conv4(x) x = F.relu(self.pool(x)) # 14 x 14 x = self.conv5(x) x = F.relu(self.pool(x)) # 7 x 7 # Classification Layers x = x.view(-1, 7 * 7 * 256) x = F.relu(self.fc1(x)) x = self.drop(x) x = F.relu(self.fc2(x)) x = self.drop(x) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The neural net has the typical structure of, first, several convolutional layers (with increasing channels) and combined with maxpooling to create feature maps, then second, the classification part implemented with 3 dense layers. The topology is guided by the paper of Simonyan, Zisserman, 2015, on 'Very Deep Convolutional Networks for Large-Scale Image Recognition'. Dropout layers reduce the tendency of overfitting. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### select loss function criterion_scratch = nn.CrossEntropyLoss() ### select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from tqdm import tqdm def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() # forward pass output = model(data) loss = criterion(output, target) # backward pass loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss), save_path) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.882775 Validation Loss: 4.874159 Validation loss decreased (inf --> 4.874159). Saving model ... model_scratch.pt Epoch: 2 Training Loss: 4.758156 Validation Loss: 4.699181 Validation loss decreased (4.874159 --> 4.699181). Saving model ... model_scratch.pt Epoch: 3 Training Loss: 4.575248 Validation Loss: 4.565222 Validation loss decreased (4.699181 --> 4.565222). Saving model ... model_scratch.pt Epoch: 4 Training Loss: 4.418465 Validation Loss: 4.384127 Validation loss decreased (4.565222 --> 4.384127). Saving model ... model_scratch.pt Epoch: 5 Training Loss: 4.266993 Validation Loss: 4.276762 Validation loss decreased (4.384127 --> 4.276762). Saving model ... model_scratch.pt Epoch: 6 Training Loss: 4.128971 Validation Loss: 4.196285 Validation loss decreased (4.276762 --> 4.196285). Saving model ... model_scratch.pt Epoch: 7 Training Loss: 4.001355 Validation Loss: 4.093141 Validation loss decreased (4.196285 --> 4.093141). Saving model ... model_scratch.pt Epoch: 8 Training Loss: 3.902557 Validation Loss: 3.986703 Validation loss decreased (4.093141 --> 3.986703). Saving model ... model_scratch.pt Epoch: 9 Training Loss: 3.808915 Validation Loss: 3.939519 Validation loss decreased (3.986703 --> 3.939519). Saving model ... model_scratch.pt Epoch: 10 Training Loss: 3.734483 Validation Loss: 3.913157 Validation loss decreased (3.939519 --> 3.913157). Saving model ... model_scratch.pt Epoch: 11 Training Loss: 3.628653 Validation Loss: 3.913089 Validation loss decreased (3.913157 --> 3.913089). Saving model ... model_scratch.pt Epoch: 12 Training Loss: 3.540115 Validation Loss: 3.771245 Validation loss decreased (3.913089 --> 3.771245). Saving model ... model_scratch.pt Epoch: 13 Training Loss: 3.473625 Validation Loss: 3.712603 Validation loss decreased (3.771245 --> 3.712603). Saving model ... model_scratch.pt Epoch: 14 Training Loss: 3.398351 Validation Loss: 3.704624 Validation loss decreased (3.712603 --> 3.704624). Saving model ... model_scratch.pt Epoch: 15 Training Loss: 3.295624 Validation Loss: 3.832102 Epoch: 16 Training Loss: 3.207484 Validation Loss: 3.619578 Validation loss decreased (3.704624 --> 3.619578). Saving model ... model_scratch.pt Epoch: 17 Training Loss: 3.113571 Validation Loss: 3.616456 Validation loss decreased (3.619578 --> 3.616456). Saving model ... model_scratch.pt Epoch: 18 Training Loss: 3.005864 Validation Loss: 3.668482 Epoch: 19 Training Loss: 2.931728 Validation Loss: 3.611060 Validation loss decreased (3.616456 --> 3.611060). Saving model ... model_scratch.pt Epoch: 20 Training Loss: 2.834457 Validation Loss: 3.736923 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function model_scratch.load_state_dict(torch.load('model_scratch.pt')) test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.716374 Test Accuracy: 15% (126/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders import torch import os import numpy as np import matplotlib.pyplot as plt from torchvision import datasets, transforms from PIL import ImageFile %matplotlib inline # to avoid truncate error ImageFile.LOAD_TRUNCATED_IMAGES = True # check if CUDA is available use_cuda = torch.cuda.is_available() data_dir = '/data/dog_images/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') # transformations to augment the images train_data_transform = transforms.Compose([ transforms.Resize((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_data_transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) train_data = datasets.ImageFolder(train_dir, transform=train_data_transform) valid_data = datasets.ImageFolder(valid_dir, transform=test_data_transform) test_data = datasets.ImageFolder(test_dir, transform=test_data_transform) # define dataloader batch_size = 64 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False) loaders_transfer = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## Specify model architecture model_transfer = models.vgg16(pretrained=True) # Freezing training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False # adjust final classification layer to expected size model_transfer.classifier[6] = nn.Linear(4096, 133) for param in model_transfer.classifier.parameters(): param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() model_transfer ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ The basis is the VGG16 pretrained model which shows overall good performance in image recoginition. The 'feature' part of the model weights are frozen to avoid general retraining and leaving weight canges only to the classification part. The final classification layer (output layer) is changed to the number of dog breed classes (133) (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim ### select loss function criterion_transfer = nn.CrossEntropyLoss() ### select optimizer / train only classifier parameters optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 5 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 1.963844 Validation Loss: 1.106330 Validation loss decreased (inf --> 1.106330). Saving model ... model_transfer.pt Epoch: 2 Training Loss: 1.917062 Validation Loss: 0.989824 Validation loss decreased (1.106330 --> 0.989824). Saving model ... model_transfer.pt Epoch: 3 Training Loss: 1.801205 Validation Loss: 1.082989 Epoch: 4 Training Loss: 1.803990 Validation Loss: 1.135736 Epoch: 5 Training Loss: 1.876270 Validation Loss: 1.024796 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.062647 Test Accuracy: 73% (614/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. from PIL import Image # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer.eval() # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] # class_names = [item for item in train_data.classes] data_transforms = transforms.Compose([ transforms.Resize((224, 224)), # to make non-square images fit transforms.ToTensor(), # standardizes to [0,1] as well transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) # normalize img = data_transforms(img) # add batch dimension as dummy img = img.unsqueeze(0) # prediciton step and extract class index if use_cuda: img = img.cuda() prediction = model_transfer(img) if use_cuda: prediction = prediction.cpu() idx = prediction.data.numpy().argmax() return class_names[idx] # predicted class index predict_breed_transfer('/data/dog_images/train/059.Doberman_pinscher/Doberman_pinscher_04199.jpg') ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt %matplotlib inline def run_app(img_path): ## handle cases for a human face, dog, and neither print("-------------------------------") img = Image.open(img_path) plt.imshow(img) plt.show() if face_detector(img_path): print("Hello, Human!") elif dog_detector(img_path): print("Hello, Dog!") else: print("Oh, who are you? I cannot recognize you :-(") return print("You look like a ", predict_breed_transfer(img_path)) return ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The overall distinction between humans and dogs seems to work relatively reliable. However, the dog breed identification is still not good enough. We suggest the following improvements:* To improve the model performance of model_transfer more labelled data on dog breed images would certainly be benefitial. At the moment we use only a limited number of images for training* Training data could also be furhter cleaned. Some images also include humans next to dogs which might create noise for the CNN* Maybe a further tuning (number of cells per layer, number of layers) of the classification layers within model_transfer CNN could also lead to a performance increase* Other images recognition CNNs could be tested for performance comparison to find a better base model for transfer learning* Fine-tuning could be another approach to improve the existing model_transfer CNN. This would however need more compute time and also would benefit from more training data.* Radically, a complete re-training of the adjusted VGG network could also lead to better performance. For this approach a larger training dataset is a must ###Code from glob import glob new_images = glob('./upload/*') for img_path in new_images: run_app(img_path) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ * 98 % of images in `human_files_short` have a detected human face* 17 % of images in `dog_files_short` have a detected human face ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. face_detected_human_files = 0 face_detected_dog_files = 0 for i in range(len(human_files_short)): if (face_detector(human_files_short[i])): face_detected_human_files+=1 if (face_detector(dog_files_short[i])): face_detected_dog_files+=1 print(face_detected_human_files, '% of images in `human_files_short` have a detected human face') print(face_detected_dog_files, '% of images in `dog_files_short` have a detected human face') ###Output 98 % of images in `human_files_short` have a detected human face 17 % of images in `dog_files_short` have a detected human face ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) data_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) img = data_transform(img) img = img.unsqueeze(0) if use_cuda: img=img.cuda() prediction = VGG16(img) max_value, max_index = torch.max(prediction,1) return max_index.item() # predicted class index print(VGG16_predict ('images/Curly-coated_retriever_03896.jpg')) ###Output 206 ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. item_index = VGG16_predict(img_path) min_dog_index =151 max_dog_index =268 if (item_index>=min_dog_index and item_index <=max_dog_index): return True return False print(dog_detector('images/Curly-coated_retriever_03896.jpg')) ###Output True ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ * 1 % of images in `human_files_short` have a detected dog* 100 % of images in `dog_files_short` have a detected dog ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_detected_human_files = 0 dog_detected_dog_files = 0 for i in range(len(human_files_short)): if (dog_detector(human_files_short[i])): dog_detected_human_files+=1 if (dog_detector(dog_files_short[i])): dog_detected_dog_files+=1 print(dog_detected_human_files, '% of images in `human_files_short` have a detected dog') print(dog_detected_dog_files, '% of images in `dog_files_short` have a detected dog') ###Output 1 % of images in `human_files_short` have a detected dog 100 % of images in `dog_files_short` have a detected dog ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size = 20 num_workers=0 train_dir = '/data/dog_images/train' valid_dir = '/data/dog_images/valid' test_dir = '/data/dog_images/test' data_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) train_data = datasets.ImageFolder(train_dir, transform=data_transform) valid_data = datasets.ImageFolder(valid_dir, transform=data_transform) test_data = datasets.ImageFolder(test_dir, transform=data_transform) #prepare data loaders train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:* I did a resize to 224x224, no cropping. It only stretched (scale) the image to the expected dimensions for the mode. * No data augmentation to increase the dataset size and get a robust model, It worked as expected with the data provided. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, stride=2, padding=1) self.conv4 = nn.Conv2d(64, 128, 3, padding=1) self.conv4_bn = nn.BatchNorm2d(128) self.fc5 = nn.Linear(128 * 3 * 3, 512) self.fc6_bn = nn.BatchNorm1d(512) self.fc7 = nn.Linear(512, 133) self.dropout = nn.Dropout(0.25) self.pool = nn.MaxPool2d(2, 2) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x= self.pool(x) #print (x.shape) x = F.relu(self.conv2(x)) x = self.pool(x) #print (x.shape) x = F.relu(self.conv3(x)) x = self.pool(x) #print (x.shape) x = F.relu(self.conv4(x)) x = self.conv4_bn(x) #print (x.shape) # flatten x = x.view(-1, 128 * 3 * 3) #print (x.shape) x = self.dropout(x) x = self.fc5(x) x = F.relu(self.fc6_bn(x)) x = self.dropout(x) x = self.fc7(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() model_scratch ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ * I checked with some bigger architectures (checking with a smaller version of VGG16 - max-pooling on the second layer when the size changed), however, it was starting from scratch and it was taking a lot of time to train. * I checked with a smaller architecture, in this case, the union of convolutional layers and max-pooling, applying batch normalization (2d) to standardize the inputs and expecting to accelerate the training time previous to perform the fully connected layer, also added batch normalization (1d) previous to the output. * As activation function I used ReLU (rectified linear unit) because it is cheap to compute and it prevents vanishing gradient problem.* During the training a got an overfit (it was reducing the Training loss, however the validation loss was increasing). It could be improved on an update on the architecture or the hyperparameters. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr = 0.005) loaders_scratch= {'train':train_loader, 'valid':valid_loader, 'test':test_loader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + loss.item()*data.size(0) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + loss.item()*data.size(0) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) print('The new valid loss has decreased from {:.6f} to {:.6f}. and the model model_scratch.pt has been updated'.format(valid_loss_min, valid_loss)) # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.854588 Test Accuracy: 12% (103/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False ## new layers automatically have requires_grad = True n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs, 133) model_transfer.classifier[6] = last_layer if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=133, bias=True) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I used VGG16, it matched with the previous preprocessed data (224x224 RGB) and it has a very good performance (it is a small CNN architecture compared to ResNet). Also, it is suitable for this problem because the my final output is 133 classes which is less than the original classes (1000). I have removed the last Fully-connected layer then the last layer is: `Linear(in_features=4096, out_features=133, bias=True)` (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(5, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.513443 Test Accuracy: 87% (730/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] data_transfer = {"train" : train_data, "valid" : valid_data, "test" : test_data} class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed data_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) img = Image.open(img_path) img = data_transform(img) img = img.unsqueeze(0) if use_cuda: img = img.cuda() prediction = model_transfer(img) breed_class = prediction.argmax() output = class_names[breed_class] return output ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. from pylab import imread,subplot,imshow,show def run_app(img_path): ## handle cases for a human face, dog, and neither image = imread(img_path) label = '' if dog_detector(img_path) == True: label ="Hello, dog!" breed = predict_breed_transfer(img_path) label += "\nYour predicted breed is... \n" + breed + "." classification ='dog' elif face_detector(img_path) == True: label ="Hello, human!" breed = predict_breed_transfer(img_path) label += "\nYou look like... \n" + breed + "." classification ='human' else: label ="Neither a dog nor a human in this image :(" classification ='none' return image, classification, label img, classification,label = run_app('myImages/0.jpg') print(label) plt.imshow(img) ###Output Hello, human! You look like... American water spaniel. ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ The result was well (I tried to have images similar that the ones provided), however, there is a bias respected to the images provided, and I didn't perform any augmentation technique that could help to have a more robust model. If I test with a more varied data, the result will not be as expected. Some points of improvements are:1. Use a face detector with deep learning instead of openCV, it could be MTCNN .2. Improve the dataset to train, perform some augmentation techniques for the dog breed.3. reduce the learning rate and perform more epochs, it could help to get more accuracy (validate that there is not overfit)4. Test with others architectures to perform transfer learning (for instance ResNet)5. It is performing 2 VGG16, checking if it is dog based on the original training data and then a classification of breed, this could be join in only one CNN. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below human_files=np.array(glob("myImages/human_files/*")) dog_files=np.array(glob("myImages/dog_files/*")) fig = plt.figure(figsize=(50, 3)) for idx in np.arange(len(human_files)): img, classification,label = run_app(human_files[idx]) ax = fig.add_subplot(1, 20/2, idx+1, xticks=[], yticks=[]) plt.imshow(img) ax.set_title("{}".format(label), color=("green" if 'human'==classification else "red")) fig = plt.figure(figsize=(50, 3)) for idx in np.arange(len(dog_files)): img, classification,label = run_app(dog_files[idx]) ax = fig.add_subplot(1, 20/2, idx+1, xticks=[], yticks=[]) plt.imshow(img) ax.set_title("{}".format(label), color=("green" if 'dog'==classification else "red")) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces_detected = 0 for human_file in human_files_short: face_detected = face_detector(human_file) if face_detected: human_faces_detected += 1 print("Detected ", human_faces_detected, "% faces in humans, goal 100%") human_faces_detected = 0 for dog_file in dog_files_short: face_detected = face_detector(dog_file) if face_detected: human_faces_detected += 1 print("Detected ", human_faces_detected, "% faces in dogs, goal 0%") ###Output Detected 98 % faces in humans, goal 100% Detected 17 % faces in dogs, goal 0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_pil = Image.open(img_path) img_tensor = transform(img_pil) img_tensor.unsqueeze_(0) if use_cuda: img_tensor = img_tensor.cuda() result = VGG16(img_tensor) if use_cuda: return result.cpu().data.numpy().argmax() else: return result.data.numpy().argmax() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) return (index >= 151 and index <= 268) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dogs_detected = 0 for file in human_files_short: dog_detected = dog_detector(file) if dog_detected: dogs_detected += 1 print("Detected ", dogs_detected, "% dogs in humans, goal 0%") dogs_detected = 0 for file in dog_files_short: dog_detected = dog_detector(file) if dog_detected: dogs_detected += 1 print("Detected ", dogs_detected, "% dogs in dogs, goal 100%") ###Output Detected 0 % dogs in humans, goal 0% Detected 100 % dogs in dogs, goal 100% ###Markdown WOW 0_0 GPU > CPU We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_dataset = datasets.ImageFolder('dogImages/train', transform) valid_dataset = datasets.ImageFolder('dogImages/valid', transform) test_dataset = datasets.ImageFolder('dogImages/test', transform) train_data = torch.utils.data.DataLoader(train_dataset) valid_data = torch.utils.data.DataLoader(valid_dataset) test_data = torch.utils.data.DataLoader(test_dataset) loaders_scratch = { 'train': train_data, 'valid': valid_data, 'test': test_data } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_count, dog_count = 0, 0 for i in range(100): if face_detector(human_files_short[i]): human_count += 1 if face_detector(dog_files_short[i]): dog_count += 1 print("Percentage of human images that detected: {}%".format(human_count)) print("Percentage of dog images that detected: {}%".format(dog_count)) ###Output Percentage of human images that detected: 98% Percentage of dog images that detected: 17% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 100193294.93it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path) normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) img_t = transform(image).cuda() data = img_t.unsqueeze(0) VGG16.eval() predict_v = VGG16(data) predict = predict_v.data.cpu().argmax() return predict # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. result = VGG16_predict(img_path) return result >= 151 and result <= 268 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ - Percentage of dog images that detected in human files: 0%- Percentage of dog images that detected in dog files: 100% ###Code print(human_files_short[0]) ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_in_human = 0 dog_in_dog = 0 for i in range(100): if dog_detector(human_files_short[i]): dog_in_human += 1 if dog_detector(dog_files_short[i]): dog_in_dog += 1 print("Percentage of dog images that detected in human files: {}%".format(dog_in_human)) print("Percentage of dog images that detected in dog files: {}%".format(dog_in_dog)) ###Output Percentage of dog images that detected in human files: 0% Percentage of dog images that detected in dog files: 100% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torchvision.transforms as transforms from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True train_dir = '/data/dog_images/train/' test_dir = '/data/dog_images/test/' valid_dir = '/data/dog_images/valid/' normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) image_size = 224 data_transforms = {} data_transforms['train'] = transforms.Compose([transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), normalize]) data_transforms['valid'] = transforms.Compose([transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), normalize]) data_transforms['test'] = transforms.Compose([transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), normalize]) data_datasets = {} data_datasets['train'] = datasets.ImageFolder(train_dir, data_transforms['train']) data_datasets['valid'] = datasets.ImageFolder(valid_dir, data_transforms['valid']) data_datasets['test'] = datasets.ImageFolder(test_dir, data_transforms['test']) batch_size = 10 num_workers = 0 data_loaders = {} data_loaders['train'] = torch.utils.data.DataLoader(data_datasets['train'], batch_size = batch_size, num_workers = num_workers, shuffle=True) data_loaders['valid'] = torch.utils.data.DataLoader(data_datasets['valid'], batch_size = batch_size, num_workers = num_workers, shuffle=True) data_loaders['test'] = torch.utils.data.DataLoader(data_datasets['test'], batch_size = batch_size, num_workers = num_workers, shuffle=True) print(len(data_datasets['train'])) print(len(data_datasets['valid'])) print(len(data_datasets['test'])) print(data_transforms) ###Output {'train': Compose( Resize(size=224, interpolation=PIL.Image.BILINEAR) CenterCrop(size=(224, 224)) RandomHorizontalFlip(p=0.5) RandomRotation(degrees=(-15, 15), resample=False, expand=False) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ), 'valid': Compose( Resize(size=224, interpolation=PIL.Image.BILINEAR) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ), 'test': Compose( Resize(size=224, interpolation=PIL.Image.BILINEAR) CenterCrop(size=(224, 224)) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) )} ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- I used resize and centreCrop to images. I picked up the 224x224 pixels as the image size, because I think it's a good balance between accuracy and performance.- Yes, augment the dataset by using random horizontal flip and rotation. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.layer1 = nn.Sequential( nn.Conv2d(3, 16, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) self.layer2 = nn.Sequential( nn.Conv2d(16, 32, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) self.layer3 = nn.Sequential( nn.Conv2d(32, 64, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2)) self.drop_out = nn.Dropout(0.2) self.fc1 = nn.Linear(28*28*64, 1000) self.fc2 = nn.Linear(1000, 133) def forward(self, x): ## Define forward behavior x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = x.view(x.size(0), -1) x = self.drop_out(x) x = self.fc1(x) x = F.relu(x) x = self.drop_out(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ - self.layer1/2/3 are the three convolutional layers. They convert the depth from 3->16->32->64. After that, use ReLU activation function and maxpool layer to reduce the size of images.- Use the view to reshape the images.- Use 0.2 in dropout layer.- At the end, use linear layer to reduce the output to 133. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables, initialize weights to zero optimizer.zero_grad() # forward pass output = model(data) # calculate batch loss loss = criterion(output, target) # backward pass loss.backward() # parameter update optimizer.step() # update training loss train_loss += loss.item() * data.size(0) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass output = model(data) # batch loss loss = criterion(output, target) # update validation loss valid_loss += loss.item() * data.size(0) # calculate average losses train_loss = train_loss/len(loaders['train'].dataset) valid_loss = valid_loss/len(loaders['valid'].dataset) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f})'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(15, data_loaders, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.391469 Validation Loss: 4.349211 Validation loss decreased (inf --> 4.349211) Epoch: 2 Training Loss: 4.246900 Validation Loss: 4.269958 Validation loss decreased (4.349211 --> 4.269958) Epoch: 3 Training Loss: 4.126513 Validation Loss: 4.269004 Validation loss decreased (4.269958 --> 4.269004) Epoch: 4 Training Loss: 4.015634 Validation Loss: 4.172959 Validation loss decreased (4.269004 --> 4.172959) Epoch: 5 Training Loss: 3.904706 Validation Loss: 4.094007 Validation loss decreased (4.172959 --> 4.094007) Epoch: 6 Training Loss: 3.763593 Validation Loss: 4.055620 Validation loss decreased (4.094007 --> 4.055620) Epoch: 7 Training Loss: 3.615709 Validation Loss: 4.193440 Epoch: 8 Training Loss: 3.456630 Validation Loss: 4.227403 Epoch: 9 Training Loss: 3.288442 Validation Loss: 4.194191 Epoch: 10 Training Loss: 3.092517 Validation Loss: 4.147779 Epoch: 11 Training Loss: 2.883555 Validation Loss: 4.159392 Epoch: 12 Training Loss: 2.679666 Validation Loss: 4.307968 Epoch: 13 Training Loss: 2.443467 Validation Loss: 4.452675 Epoch: 14 Training Loss: 2.205491 Validation Loss: 4.724467 Epoch: 15 Training Loss: 1.977079 Validation Loss: 4.856021 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(data_loaders, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.056561 Test Accuracy: 9% (79/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = data_loaders data_transfer = data_datasets print(VGG16) ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Sequential( (0): Linear(in_features=25088, out_features=4096, bias=True) (1): ReLU(inplace) (2): Dropout(p=0.5) (3): Linear(in_features=4096, out_features=4096, bias=True) (4): ReLU(inplace) (5): Dropout(p=0.5) (6): Linear(in_features=4096, out_features=1000, bias=True) ) ) ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) for param in model_transfer.features.parameters(): param.requires_grad = False model_transfer.classifier[6] = nn.Linear(model_transfer.classifier[6].in_features, 133) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ - I found the classifier[6] is the last linear funtion in vgg16, so I just replace it with my linear function with 133 as output.- vgg16 is a good model to classify images, and I can use the pretrained model in vgg16. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 15 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 1.563395 Validation Loss: 0.829224 Validation loss decreased (inf --> 0.829224) Epoch: 2 Training Loss: 0.876681 Validation Loss: 0.666382 Validation loss decreased (0.829224 --> 0.666382) Epoch: 3 Training Loss: 0.712009 Validation Loss: 0.686167 Epoch: 4 Training Loss: 0.603544 Validation Loss: 0.675482 Epoch: 5 Training Loss: 0.515811 Validation Loss: 0.687620 Epoch: 6 Training Loss: 0.476161 Validation Loss: 0.599185 Validation loss decreased (0.666382 --> 0.599185) Epoch: 7 Training Loss: 0.432296 Validation Loss: 0.596716 Validation loss decreased (0.599185 --> 0.596716) Epoch: 8 Training Loss: 0.358963 Validation Loss: 0.630651 Epoch: 9 Training Loss: 0.361185 Validation Loss: 0.577483 Validation loss decreased (0.596716 --> 0.577483) Epoch: 10 Training Loss: 0.315476 Validation Loss: 0.573029 Validation loss decreased (0.577483 --> 0.573029) Epoch: 11 Training Loss: 0.306634 Validation Loss: 0.703429 Epoch: 12 Training Loss: 0.275553 Validation Loss: 0.687414 Epoch: 13 Training Loss: 0.257099 Validation Loss: 0.627873 Epoch: 14 Training Loss: 0.272152 Validation Loss: 0.591930 Epoch: 15 Training Loss: 0.234158 Validation Loss: 0.651825 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.731236 Test Accuracy: 82% (693/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path).convert('RGB') prediction_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) image = prediction_transform(image)[:3,:,:].unsqueeze(0) if use_cuda: image = image.cuda() predict = model_transfer(image) predict_idx = predict.data.cpu().argmax() return class_names[predict_idx] predict_breed_transfer(data_transfer['train'].imgs[0][0]) ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither image = Image.open(img_path) plt.imshow(image) plt.show() predicted_breed = predict_breed_transfer(img_path) if dog_detector(img_path): print("Dog detected! You like a {}".format(predicted_breed)) elif face_detector(img_path): print("Human detected! You look like a {}".format(predicted_breed)) else: print("Error! Not human, not dog.") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The detect result is amazing than I expected.Three possible points for improvement:- Add more convolutional layers- Use more augmentation (vertical flip, move to the corner of the image...)- Choose the widely images as the train set ###Code human_files = np.array(glob("./human/*")) dog_files = np.array(glob("./dogs/*")) ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)- 96% Human faces detected in first 100 of human_files- 18% Human faces detected in first 100 of dog_files ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces_detected = 0 for human in human_files_short: if face_detector(human): human_faces_detected += 1 print("Human Faces detected in human_files_short - {}%".format((human_faces_detected/len(human_files_short)*100))) dog_faces_detected = 0 for dog in dog_files_short: if face_detector(dog): dog_faces_detected += 1 print("Human Faces detected in dog_files_short {}%".format((dog_faces_detected/len(dog_files_short)*100))) ###Output Human Faces detected in human_files_short - 96.0% Human Faces detected in dog_files_short 18.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code # check if CUDA is available train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available. Training on CPU ...') else: print('CUDA is available! Training on GPU ...') from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image # first load image image = Image.open(img_path) # define transforms preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_tensor = preprocess(image) # resize tensor img_tensor.unsqueeze_(0) # check if gpu available if train_on_gpu: img_tensor = img_tensor.cuda() output = VGG16(img_tensor) # convert output probabilities to predicted class predicted_indx = output.data.numpy().argmax() if not train_on_gpu else output.data.cpu().numpy().argmax() return predicted_indx # predicted class index !bash -c "ls -ltr dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg" VGG16_predict('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) if index >= 151 and index < 268: return True else: return False # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_faces_detected = 0 for human in human_files_short: if dog_detector(human): human_faces_detected += 1 print("Dog detected in human_files_short - {}%".format((human_faces_detected/len(human_files_short)*100))) dog_faces_detected = 0 for dog in dog_files_short: if dog_detector(dog): dog_faces_detected += 1 print("Dog detected in dog_files_short {}%".format((dog_faces_detected/len(dog_files_short)*100))) ###Output Dog detected in human_files_short - 0.0% Dog detected in dog_files_short 92.0% ###Markdown __Answer:__ - 0% percentage of images in human_files_short have detected dog- 93% percentage of the images in dog_files_short have a detected dog We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. # Clear VGG16 from memory del VGG16 import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 0% | 15% | ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code from PIL import Image im = Image.open('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') im.size from torchvision import datasets import torchvision.transforms as transforms from torch.utils.data import DataLoader ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # convert data to a normalized torch.FloatTensor # https://pytorch.org/vision/stable/transforms.html # Resize - Resize the input image to the given size. # CenterCrop - Crops the given image at the center. # RandomHorizontalFlip - Horizontally flip the given image randomly with a given probability (default 0.5). # ToTensor - Convert a PIL Image or numpy.ndarray to tensor. # Normalize - Normalize a tensor image with mean and standard deviation. This transform does not support PIL Image. train_transforms = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) val_test_transforms = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # load dataset train_dataset = datasets.ImageFolder(root='dogImages/train/',transform=train_transforms) validation_dataset = datasets.ImageFolder(root='dogImages/valid',transform=val_test_transforms) test_dataset = datasets.ImageFolder(root='dogImages/test',transform=val_test_transforms) print(f"{len(train_dataset)} Images available for training") print(f"{len(validation_dataset)} Images available for validation") print(f"{len(test_dataset)} Images available for testing") # create loaders based batch_size & shuffle train_loader = DataLoader(train_dataset,batch_size=10,num_workers=0,shuffle=True) validation_loader = DataLoader(validation_dataset,batch_size=10,num_workers=0,shuffle=True) test_loader = DataLoader(test_dataset,batch_size=10,num_workers=0,shuffle=True) import os # get classes names = [name for name in os.listdir("./dogImages/train/")] classes = {int(name.split('.')[0]): name.split('.')[1].replace('_',' ') for name in names} classes[1] dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() fig = plt.figure(figsize=(25,4)) for i in np.arange(10): ax = fig.add_subplot(2, 10/2, i+1, xticks=[], yticks=[]) x = np.array((images[i] - np.min(images[i])) / (np.max(images[i]) - np.min(images[i]))) plt.imshow(np.transpose(x, (1,2,0))) #print(">>",classes[int(labels[i])]) ax.set_title(classes[int(labels[i])]) images[0].shape validiter = iter(validation_loader) valid_images, labels = validiter.next() valid_images[0].shape testiter = iter(validation_loader) test_images, labels = testiter.next() test_images[0].shape # total number of classes len(classes) import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 1% | 15% | ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- I resized & cropped to 224, so that when applying max pooling it would be easy to down sample. - I did centercrop (assuming most the dogs within this boundary) and randomhorizontal flip to avoid overfitting (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # convolutional layer (sees 224x224x3 image tensor) self.conv1 = nn.Conv2d(3, 64, 3, padding=1) # above will be converted to 224x224x64 by conv1 # after applying max pooling layer (2,2) we get 112x112x64 # this layer sees 112x112x64 self.conv2 = nn.Conv2d(64,128,3, padding=1) # conv2 outputs = 112x112x128 # max pool outputs = 56x56x128 # this layer sees 56x56x128 tensor self.conv3 = nn.Conv2d(128, 128, 3, padding=1) # conv3 outputs = 56x56x128 # max pool outputs = 28x28x128 # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (28*28*128 -> 4096) self.fc1 = nn.Linear(28 * 28 * 128, 1000) # linear layer (4096 -> 1024) self.fc2 = nn.Linear(1000, 500) # linear layer (1024 -> 133) self.fc3 = nn.Linear(500, 133) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) # flatten image input _,d, h, w = x.shape x = x.view(-1, d*h*w) # add 1st hidden layer, with relu activation function x = F.relu(self.fc1(x)) # add dropout layer x = self.dropout(x) # add 2nd hidden layer, with relu activation function x = F.relu(self.fc2(x)) # add dropout layer x = self.dropout(x) x = F.relu(self.fc3(x)) #x = self.fc3(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() model_scratch import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 0% | 20% | ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function # This criterion combines LogSoftmax and NLLLoss in one single class. criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer # Implements stochastic gradient descent (optionally with momentum). optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01, momentum=0.9) !conda --version !nvcc --version import torch print(torch.__version__) !pip install GPUtil def pretty_size(size): """Pretty prints a torch.Size object""" assert(isinstance(size, torch.Size)) return " × ".join(map(str, size)) def dump_tensors(gpu_only=True): """Prints a list of the Tensors being tracked by the garbage collector.""" import gc total_size = 0 for obj in gc.get_objects(): try: if torch.is_tensor(obj): if not gpu_only or obj.is_cuda: print("%s:%s%s %s" % (type(obj).__name__, " GPU" if obj.is_cuda else "", " pinned" if obj.is_pinned else "", pretty_size(obj.size()))) total_size += obj.numel() elif hasattr(obj, "data") and torch.is_tensor(obj.data): if not gpu_only or obj.is_cuda: print("%s → %s:%s%s%s%s %s" % (type(obj).__name__, type(obj.data).__name__, " GPU" if obj.is_cuda else "", " pinned" if obj.data.is_pinned else "", " grad" if obj.requires_grad else "", " volatile" if obj.volatile else "", pretty_size(obj.data.size()))) total_size += obj.data.numel() except Exception as e: pass print("Total size:", total_size) import gc def del_tensors(): total_size = 0 for obj in gc.get_objects(): if torch.is_tensor(obj): print("deleting obj {}".format(pretty_size(obj.size()))) del obj gc.collect() torch.cuda.empty_cache() dump_tensors() import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 0% | 20% | ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass - compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update the average validation loss valid_loss = valid_loss + ( (1/(batch_idx + 1)) * (loss.data - valid_loss )) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print("Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...".format( valid_loss_min,valid_loss)) # save model torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model loaders_scratch = { 'train': train_loader, 'valid': validation_loader, 'test': test_loader } # epochs epochs = 20 # train the model model_scratch = train(epochs, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.304570 Validation Loss: 4.096349 Validation loss decreased (inf --> 4.096349). Saving model ... Epoch: 2 Training Loss: 4.267185 Validation Loss: 4.216056 Epoch: 3 Training Loss: 4.253623 Validation Loss: 4.025999 Validation loss decreased (4.096349 --> 4.025999). Saving model ... Epoch: 4 Training Loss: 4.240735 Validation Loss: 4.096261 Epoch: 5 Training Loss: 4.230799 Validation Loss: 4.087922 Epoch: 6 Training Loss: 4.184386 Validation Loss: 4.044747 Epoch: 7 Training Loss: 4.176115 Validation Loss: 4.000530 Validation loss decreased (4.025999 --> 4.000530). Saving model ... Epoch: 8 Training Loss: 4.165697 Validation Loss: 4.016138 Epoch: 9 Training Loss: 4.147571 Validation Loss: 3.895378 Validation loss decreased (4.000530 --> 3.895378). Saving model ... Epoch: 10 Training Loss: 4.170587 Validation Loss: 3.944375 Epoch: 11 Training Loss: 4.118514 Validation Loss: 3.940952 Epoch: 12 Training Loss: 4.113554 Validation Loss: 3.914011 Epoch: 13 Training Loss: 4.084664 Validation Loss: 3.973187 Epoch: 14 Training Loss: 4.102456 Validation Loss: 3.914055 Epoch: 15 Training Loss: 4.069668 Validation Loss: 3.988956 Epoch: 16 Training Loss: 4.061467 Validation Loss: 3.896282 Epoch: 17 Training Loss: 4.097472 Validation Loss: 3.984277 Epoch: 18 Training Loss: 4.074789 Validation Loss: 3.848127 Validation loss decreased (3.895378 --> 3.848127). Saving model ... Epoch: 19 Training Loss: 4.104012 Validation Loss: 3.914968 Epoch: 20 Training Loss: 4.082767 Validation Loss: 3.888077 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) dump_tensors() import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 0% | 31% | ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch loaders_transfer ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn # using vgg16 model_transfer = models.vgg16(pretrained=True) print(f"Last layer input {model_transfer.classifier[6].in_features}") print(f"Last layer output {model_transfer.classifier[6].out_features}") # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False # get last layer input n_inputs = model_transfer.classifier[6].in_features # set last layer output to 133 dog breed classes, 4096=>133 last_layer = nn.Linear(n_inputs, len(classes)) # assign the last layer to the transferred model model_transfer.classifier[6] = last_layer # model_transfer.classifier.add_module('7', nn.ReLU(inplace=True)) # print last layer output print(model_transfer.classifier[6].out_features) if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 0% | 50% | ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) import gc import GPUtil import torch gc.collect() torch.cuda.empty_cache() GPUtil.showUtilization() ###Output | ID | GPU | MEM | ------------------ | 0 | 1% | 49% | ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.369894 Test Accuracy: 87% (732/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] # ass_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] print(classes) def predict_breed_transfer(img_path): # load the image and return the predicted breed # first load image image = Image.open(img_path) # define transforms preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) img_tensor = preprocess(image) # resize tensor img_tensor.unsqueeze_(0) # check if gpu available if train_on_gpu: img_tensor = img_tensor.cuda() output = model_transfer(img_tensor) # convert output probabilities to predicted class predicted_indx = output.data.numpy().argmax() if not train_on_gpu else output.data.cpu().numpy().argmax() return classes[predicted_indx+1] predict_breed_transfer('dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if dog_detector(img_path): print(f"Dog detected, breed of the dog is {predict_breed_transfer(img_path)}") elif face_detector(img_path): print(f"Human Face detected, resembling dog breed is {predict_breed_transfer(img_path)}") else: print("Neither human or dog detected") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:5], dog_files[:5])): print(f"file - {file}") run_app(file) ###Output file - lfw\Aaron_Eckhart\Aaron_Eckhart_0001.jpg Human Face detected, resembling dog breed is Chinese crested file - lfw\Aaron_Guiel\Aaron_Guiel_0001.jpg Human Face detected, resembling dog breed is Dachshund file - lfw\Aaron_Patterson\Aaron_Patterson_0001.jpg Neither human or dog detected file - lfw\Aaron_Peirsol\Aaron_Peirsol_0001.jpg Human Face detected, resembling dog breed is Dachshund file - lfw\Aaron_Peirsol\Aaron_Peirsol_0002.jpg Human Face detected, resembling dog breed is Dachshund file - dogImages\test\001.Affenpinscher\Affenpinscher_00003.jpg Dog detected, breed of the dog is Affenpinscher file - dogImages\test\001.Affenpinscher\Affenpinscher_00023.jpg Dog detected, breed of the dog is Affenpinscher file - dogImages\test\001.Affenpinscher\Affenpinscher_00036.jpg Dog detected, breed of the dog is Affenpinscher file - dogImages\test\001.Affenpinscher\Affenpinscher_00047.jpg Dog detected, breed of the dog is Affenpinscher file - dogImages\test\001.Affenpinscher\Affenpinscher_00048.jpg Dog detected, breed of the dog is Affenpinscher ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. |Yellow Labrador | Chocolate Labrador | Black Labrador||- | -|-|| | | |We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces_count = 0 for human in human_files_short: if(face_detector(human) == True): human_faces_count +=1 dog_faces_count = 0 for dog in dog_files_short: if(face_detector(dog) == True): dog_faces_count +=1 #count equals percentage because the number of elements is 100 print("{} percentage of the first 100 images in human_files have a detected human face".format(human_faces_count)) print("{} percentage of the first 100 images in dog_files have a detected human face".format(dog_faces_count)) ###Output 98 percentage of the first 100 images in human_files have a detected human face 17 percentage of the first 100 images in dog_files have a detected human face ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 101914350.66it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path im = Image.open(img_path) im_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) im = im_transforms(im) # add dimension as the model expects batches of images im = im.unsqueeze(0) if use_cuda: im = im.cuda() ## Return the *index* of the predicted class for that image # Set model to eval mode VGG16.eval() # Turn of gradients with torch.no_grad(): output = VGG16(im) probability, class_idx = output.max(1) return class_idx.item() # predicted class index VGG16_predict('/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. class_idx = VGG16_predict(img_path) return (True if 151 <= class_idx <= 268 else False) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ See printed results! ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_faces_count = 0 for human in human_files_short: if(dog_detector(human) == True): human_faces_count +=1 dog_faces_count = 0 for dog in dog_files_short: if(dog_detector(dog) == True): dog_faces_count +=1 #count equals percentage because the number of elements is 100 print("{} percentage of the first 100 images in human_files have a detected dog".format(human_faces_count)) print("{} percentage of the first 100 images in dog_files have a detected dog".format(dog_faces_count)) ###Output 0 percentage of the first 100 images in human_files have a detected dog 100 percentage of the first 100 images in dog_files have a detected dog ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_dir = '/data/dog_images' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' # Define transforms for the training, validation, and testing sets train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # Load the datasets with ImageFolder train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms) valid_datasets = datasets.ImageFolder(valid_dir, transform=test_transforms) test_datasets = datasets.ImageFolder(test_dir, transform=test_transforms) # Using the image datasets and the trainforms, define the dataloaders trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=32, shuffle=True) validloader = torch.utils.data.DataLoader(valid_datasets, batch_size=32) testloader = torch.utils.data.DataLoader(test_datasets, batch_size=32) loaders_scratch = {"train":trainloader, "valid":validloader, "test":testloader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- The images are resized by cropping- I picked 224 x 224 as size because I want to use the transforms also for transfer learning and the pytorch models "expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224" (https://pytorch.org/docs/stable/torchvision/models.html)- To avoid overfitting I augmented the training set. Therefore I used random resize crop, random rotation, and random horizantal flip. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.features = nn.Sequential( # input: 224 x 224 x 3 image nn.Conv2d(3, 64, 3, padding = 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, padding = 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(64, 128, 3, padding = 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 128, 3, padding = 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(128, 256, 3, padding = 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding = 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(256, 512, 3, padding = 1), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding = 1), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(512, 512, 3, padding = 1), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding = 1), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), ) self.classifier = nn.Sequential( nn.Dropout(0.25), nn.Linear(7*7*512, 4096), nn.ReLU(inplace=True), nn.Dropout(0.25), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, len(train_datasets.classes)), ) def forward(self, x): ## Define forward behavior x = self.features(x) x = x.view(-1, 7*7*512) x = self.classifier(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ At first my model looked like this.Net( (features): Sequential( (0): Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (3): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (4): ReLU(inplace) (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (6): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (7): ReLU(inplace) ) (classifier): Sequential( (0): Linear(in_features=12544, out_features=1024, bias=True) (1): ReLU(inplace) (2): Linear(in_features=1024, out_features=133, bias=True) ) ) For feature detection it had 3 convolutional layers with relu activation. Each convolutional layer got a higher number of output channels than the previous layer. Thus with each layer the network gets deeper and can extract more and more complex patterns and features that help identify the content and objects in the image. I used 3x3 kernels, stride 1 and padding 1 in every layer. Each convolutional layer was followed by a pooling layer to decrease the dimensions and avoid overfitting. I used 2x2 kernels and stride 2 which reduced the size of the x and y dimension by 2.The features were than flattened into a feature vector and passed to a classifier with fully-connected layers.As criterion i choose CrossEntropyLoss because it is a multi-class criterion and it combines softmax and negative log-likelihood. Because it performs softmax I didn't add an activation function to the output layer of the classifier.As optimizer I choose SGD with a learning rate of 0.001.After this initial setup I trained the network a few epochs, changed hyperparameters and trained again and again and again. But I was not satisfied with the training progress. I started researching and read some papers about convolutional networks for image recognition. I found "Very Deep Convolutional Networks for Large-Scale Image Recognition" (Simonyan & Zisserman, 2014) particularly interesting. In the paper the VGG network architecture is decribed and I was fascinated by the rather simple but yet effective approach. For feature detection only 3×3 convolutional layers with increasing depth and 2x2 max-pooling layers are used. Not all convolutional layers are followed by max-pooling. There are blocks of convolutional layers with the same filter size. The authors showed that multiple stacked layers with smaller sized kernels were better than one layer with a larger size kernel. The approach allowed networks with more layers and thus deeper networks which enabled the networks to learn more complex features. As classiffier three fully-connected layers were used. In the paper different configurations of the VGG architecture were described. The configurations differed only in depth (number of layers). The deeper the configuration the better it performed.I decided to implement the VGG configuration with 13 weight layers. In this configuration the convolutional layers are organized in blocks of two layers with the same filter size followed by an Max-pooling layer.I trained the network and got much better results. I was able to reach the required 10% accuracy but decided to tune the learning process further. - I increased the learning rate to 0.1 and this resulted in a higher decrease of training and validation loss- I inserted batch normalization after each convolutional layer to allow the use of a higher learning rate and add some noise to the network to avoid overfitting.- I inserted Dropout in the fully-connected layers to avoid overfittingThe final architecture is shown below. With this architecture and hyperparameters I trained the network for 50 epochs. As a result the network was able to achieve a test accuracy of 49%. The final architecture has much more layers and filters than my initial approach. The network is much deeper and can learn more complex features. ###Code model_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### # Set model to train mode model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly # Clear the gradients (gradients are accumulated) optimizer.zero_grad() # Feed forward outputs = model.forward(data) # Calculate loss loss = criterion(outputs, target) # Pass backward to calculate the gradients loss.backward() # Take a step with the optimizer to update the weights optimizer.step() ## record the average training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### # Set model to eval mode model.eval() # Turn of gradients with torch.no_grad(): for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model outputs = model(data) # calculate the batch loss loss = criterion(outputs, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. # Set model to eval mode model.eval() # Turn of gradients with torch.no_grad(): for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 1.885015 Test Accuracy: 49% (411/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.densenet121(pretrained=True) # Freeze model parameters for param in model_transfer.features.parameters(): param.requires_grad = False # Replace last layer to change the number of classes model_transfer.classifier = nn.Linear(model_transfer.classifier.in_features, len(train_datasets.classes)) if use_cuda: model_transfer = model_transfer.cuda() ###Output /opt/conda/lib/python3.6/site-packages/torchvision-0.2.1-py3.6.egg/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_. Downloading: "https://download.pytorch.org/models/densenet121-a639ec97.pth" to /root/.torch/models/densenet121-a639ec97.pth 100%|██████████| 32342954/32342954 [00:00<00:00, 46130480.69it/s] ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ For the transfer learning part I wanted to use a newer network architecture. I decided to give Densenet a try.As preparation I did the following steps:1. I loaded the pretrained densenet121 model from torch.models2. I froze all model parameters because I didn't want to train the entire network3. I replaced the last layer of the fully connected classifier to let it produce as many outputs as there are classes in the dog breed dataset4. I choose CrossEntropyLoss as criterion as it is suitable for multi-class classification and it performs softmax on the model output5. I choose SGD as optimizer and told it to just optimize the parameters of the classifier so that the parameters in the feature extraction wouldn't be changed6. As learning rate I choose 0.001Then I trained the network a few epochs and noticed that the training and validation loss decreased very little. So I increased the learning rate to 0.01. This resulted in a larger decrease in training and validation loss.I trained the model for 30 epochs and was able to achieve a test accuracy of 87%. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(30, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.497775 Test Accuracy: 87% (733/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_datasets.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed im = Image.open(img_path) im_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) im = im_transforms(im) # add dimension as the model expects batches of images im = im.unsqueeze(0) # move to GPU if use_cuda: im = im.cuda() ## Return the predicted class for that image # Set model to eval mode model_transfer.eval() # Turn of gradients with torch.no_grad(): output = model_transfer(im) _, class_idx = output.max(1) return class_names[class_idx.item()] # predicted class predict_breed_transfer('/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither im = Image.open(img_path) human_detected = face_detector(img_path) dog_detected = dog_detector(img_path) if (dog_detected): breed = predict_breed_transfer(img_path) print("This dog ...") plt.imshow(im) plt.show() print("... is a {}\n".format(breed)) elif (human_detected): breed = predict_breed_transfer(img_path) print("Hello, human!") plt.imshow(im) plt.show() print("You look like a {}\n".format(breed)) else: print("A picture of a dog or a human is required.") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ I didn't expect the model to perform so good in predicting dog breeds from human faces. To test my algorithm I used human faces that looked like specific dog breeds but I was really surprised by the output. Points for improvement:- The face_detector could be improved. It detected 98 human faces in the first 100 pictures of humans. - The dog_detector could also be improved. It only uses the dog breeds that are present in the ImageNet dataset. If the dog_detector doesn't detect a dog, then no prediction is done- Instead of just providing the most probable dog breed the top 5 most probable dog breeds could be provided. - A typical representative of the predicted dog breed could be shown to allow the user to evaluate the output- The used model could be trained longer to obtain a better accuracy- The optimizer could be adjusted to support momentum and adaptive learning rate ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below test_files = ["images/bulldog_human.jpeg", "images/cocker_spaniel_human.jpeg", "images/pudel_human.jpeg", "images/french_bulldog.jpg", "images/cattle.jpg", "images/german_shepard.jpg"] for file in test_files: run_app(file) ###Output Hello, human! ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ 98 % of human faces were correctly classified in first 100 images of human dataset.17 % of dog images were wrongly classified as human faces in first 100 images of dog dataset. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_count = 0 dog_count = 0 for img in human_files_short: if face_detector(img) == True: human_count +=1 for img in dog_files_short: if face_detector(img) == True: dog_count +=1 print ("Images correctly classified as Human Faces: ", human_count) print ("Images wrongly classified as Human faces: ", dog_count) ###Output Images correctly classified as Human Faces: 98 Images wrongly classified as Human faces: 17 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models from torchvision.models.vgg import model_urls model_urls['vgg16'] = model_urls['vgg16'].replace('https://', 'http://') # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "http://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:14<00:00, 37603133.61it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image, ImageFile import torchvision.transforms as transforms ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path).convert('RGB') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transformations = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), normalize]) transformed_image = transformations(image)[:3,:,:].unsqueeze(0) if use_cuda: new_image = transformed_image.cuda() out = VGG16(new_image) return torch.max(out,1)[1].item() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. predict_index = VGG16_predict(img_path) output = predict_index >=151 and predict_index <=268 return output # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ All dog faces are correctly detected in dog_files_short which means 100% of images.1 % images have been wrongly classifed as dogs in human_files_short. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. DD_dogs = 0 DD_humans = 0 for img in human_files_short: if dog_detector(img) == True: DD_humans +=1 for img in dog_files_short: if dog_detector(img) == True: DD_dogs +=1 print ("Images correctly classified as Dog Faces: ", DD_dogs) print ("Images wrongly classified as human faces: ", DD_humans) ###Output Images correctly classified as Dog Faces: 100 Images wrongly classified as human faces: 1 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size = 20 n = 0 data_dir = '/data/dog_images/' train_path = data_dir + 'train' validation_path = data_dir + 'valid' test_path = data_dir + 'test' normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder(train_path, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), transforms.ToTensor(), normalize, ])) validation_dataset = datasets.ImageFolder(validation_path, transforms.Compose([ transforms.Resize(size=(224,224)), transforms.ToTensor(), normalize, ])) test_dataset = datasets.ImageFolder(test_path, transforms.Compose([ transforms.Resize(size=(224,224)), transforms.ToTensor(), normalize, ])) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size= batch_size, num_workers = n, shuffle = True) validation_loader = torch.utils.data.DataLoader(validation_dataset, batch_size= batch_size, num_workers = n) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size= batch_size, num_workers = n) loaders_scratch = { 'train': train_loader, 'valid': validation_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I have used size of 224x224 because most models like VGG16 use this input size.Image augementation has been for training data to avoid overfitting of model.Transforms used: Random resize crop to 224, random flipping and random rotation.Onyl image resizing has been done for validation and test data while normalization has been applied to all datasets. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 36, 3, padding=1) self.conv2 = nn.Conv2d(36, 64, 3, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) self.fc1 = nn.Linear(28*28*128, 512) self.fc2 = nn.Linear(512, 133) self.pool = nn.MaxPool2d(2, 2) self.dropout = nn.Dropout(0.25) self.batch_norm = nn.BatchNorm1d(512) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 28*28*128) x = F.relu(self.batch_norm(self.fc1(x))) x = F.relu(self.fc2(x)) x = self.dropout(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 36, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv2): Conv2d(36, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (fc1): Linear(in_features=100352, out_features=512, bias=True) (fc2): Linear(in_features=512, out_features=133, bias=True) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (dropout): Dropout(p=0.25) (batch_norm): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. The model has 3 convolutional layers. All layer have kernel size of 3 and stride 1. The first layer takes 224x224 image and final layer gives an output size of 128. ReLu activation function is used here. Pooling layer of (2,2) is used to reduce input size by 2. The two fully connected layers produces 133 dimensional output. Dropout of 0.25 has also been done. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.02) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving the model'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(15, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.839548 Validation Loss: 4.741594 Validation loss decreased (inf --> 4.741594). Saving the model Epoch: 2 Training Loss: 4.744754 Validation Loss: 4.645944 Validation loss decreased (4.741594 --> 4.645944). Saving the model Epoch: 3 Training Loss: 4.693456 Validation Loss: 4.584108 Validation loss decreased (4.645944 --> 4.584108). Saving the model Epoch: 4 Training Loss: 4.646242 Validation Loss: 4.556639 Validation loss decreased (4.584108 --> 4.556639). Saving the model Epoch: 5 Training Loss: 4.616246 Validation Loss: 4.529830 Validation loss decreased (4.556639 --> 4.529830). Saving the model Epoch: 6 Training Loss: 4.574251 Validation Loss: 4.463962 Validation loss decreased (4.529830 --> 4.463962). Saving the model Epoch: 7 Training Loss: 4.547513 Validation Loss: 4.427684 Validation loss decreased (4.463962 --> 4.427684). Saving the model Epoch: 8 Training Loss: 4.513374 Validation Loss: 4.397489 Validation loss decreased (4.427684 --> 4.397489). Saving the model Epoch: 9 Training Loss: 4.467683 Validation Loss: 4.363077 Validation loss decreased (4.397489 --> 4.363077). Saving the model Epoch: 10 Training Loss: 4.444207 Validation Loss: 4.303313 Validation loss decreased (4.363077 --> 4.303313). Saving the model Epoch: 11 Training Loss: 4.423120 Validation Loss: 4.268695 Validation loss decreased (4.303313 --> 4.268695). Saving the model Epoch: 12 Training Loss: 4.393801 Validation Loss: 4.218059 Validation loss decreased (4.268695 --> 4.218059). Saving the model Epoch: 13 Training Loss: 4.336800 Validation Loss: 4.209589 Validation loss decreased (4.218059 --> 4.209589). Saving the model Epoch: 14 Training Loss: 4.331115 Validation Loss: 4.240541 Epoch: 15 Training Loss: 4.280185 Validation Loss: 4.129276 Validation loss decreased (4.209589 --> 4.129276). Saving the model ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.109389 Test Accuracy: 12% (101/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet101(pretrained=True) if use_cuda: model_transfer = model_transfer.cuda() print(model_transfer) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133, bias=True) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I used resnet101 architecture which is pre-trained on Imagenet dataset. I expected a better accuracy with resnet101 and therefore used it. The architecture is 101 layers deep, within just 5 epochs, the model got 79% accuracy. Usage of more epochs can improve the accuracy.Steps:1. Import pre-trained resnet101 model2. Change the out_features of fully connected layer to 133 to solve the classification problem3. CrossEntropy loss function is chosen as loss function. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr=0.02) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(5, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 3.547851 Validation Loss: 1.949211 Validation loss decreased (inf --> 1.949211). Saving the model Epoch: 2 Training Loss: 2.037324 Validation Loss: 1.149551 Validation loss decreased (1.949211 --> 1.149551). Saving the model Epoch: 3 Training Loss: 1.572055 Validation Loss: 0.899921 Validation loss decreased (1.149551 --> 0.899921). Saving the model Epoch: 4 Training Loss: 1.355519 Validation Loss: 0.747146 Validation loss decreased (0.899921 --> 0.747146). Saving the model Epoch: 5 Training Loss: 1.237200 Validation Loss: 0.644254 Validation loss decreased (0.747146 --> 0.644254). Saving the model ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.691277 Test Accuracy: 79% (668/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. data_transfer = loaders_transfer # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path).convert('RGB') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) transformations = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), normalize]) transformed_image = transformations(image)[:3,:,:].unsqueeze(0) if use_cuda: transformed_image = transformed_image.cuda() output = model_transfer(transformed_image) pred_index = torch.max(output,1)[1].item() return class_names[pred_index] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def load_image(img_path): img = Image.open(img_path) plt.imshow(img) plt.show() def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path): print ("Human!") predicted_breed = predict_breed_transfer(img_path) print("Predicted breed: ",predicted_breed) load_image(img_path) elif dog_detector(img_path): print ("Dog!") predicted_breed = predict_breed_transfer(img_path) print("Predicted breed: ",predicted_breed) load_image(img_path) else: print ("Invalid Image") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ Yes, the model performed better than my expectation.Following points could help in improvement of my algorithm:1. Using more training data.2. Doing hyper parameter tuning.3. More image augmentation can be tried.4. Different architecture then ResNet101 may be work better. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output Human! Predicted breed: Beagle ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home diretcory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)Correct human detection : 99.0 % Inorrect human detection : 1.0 % Correct dog detection : 4.0 % Inorrect dog detection : 96.0 % ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# pos_human = 0 neg_human = 0 ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. for image_path in tqdm(human_files_short): if face_detector(image_path): pos_human += 1 else: neg_human += 1 pos_dog = 0 neg_dog = 0 for image_path in tqdm(dog_files_short): if face_detector(image_path): pos_dog += 1 else: neg_dog += 1 ## Reporting human_percent_correct = 100 * pos_human / (pos_human + neg_human) human_percent_wrong = 100 * neg_human / (pos_human + neg_human) dog_percent_correct = 100 * pos_dog / (pos_dog + neg_dog) dog_percent_wrong = 100 * neg_dog / (pos_dog + neg_dog) print('Correct human detection : ', human_percent_correct, '%') print('Inorrect human detection : ', human_percent_wrong, '%') print('Correct dog detection : ', dog_percent_correct, '%') print('Inorrect dog detection : ', dog_percent_wrong, '%') ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. Face Detection Using Deep Learning Steps:1. EDA: 1. Calc mean height, width and channels of the images. 2. Display 9 random images2. Based on dimensions of the images, use either custom CNN or use transfer learning3. Train and test, finding out the best model.4. Train on entire dataset and use it for the rest of the project. EDA def getMeanWidthHeightChannels(files): count = 0 width = 0 height = 0 channels = 0 for image_path in tqdm(files): img = cv2.imread(image_path) h, w, c = img.shape width = (width * count + w) / (count + 1) height = (height * count + h) / (count + 1) channels = (channels * count + c) / (count + 1) count += 1 return height, width, channels h_height, h_width, h_channels = getMeanWidthHeightChannels(human_files)print(h_height)print(h_width)print(h_channels) d_height, d_width, d_channels = getMeanWidthHeightChannels(dog_files) print(d_height)print(d_width)print(d_channels) Viewing a few images import matplotlib.image as mpimgh_img = []d_img = []for i in range(9, 18): h_img.append(mpimg.imread(human_files_short[i])) d_img.append(mpimg.imread(dog_files_short[i])) def plotImages(images): fig, axeslist = plt.subplots(ncols=3, nrows=3) for index, image in enumerate(images): axeslist.ravel()[index].imshow(image) axeslist.ravel()[index].set_axis_off() plotImages(h_img) plotImages(d_img) --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') VGG16 = VGG16.to(device) ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image with torch.no_grad(): img = Image.open(img_path) trans1 = transforms.Resize((224, 224)) trans2 = transforms.ToTensor() img = trans1(img) img = trans2(img) img = img.view(1, 3, 224, 224) img = img.to(device) out = VGG16(img) index = torch.topk(out, 1)[1].item() return index # predicted class index VGG16_predict(dog_files_short[9]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) if index >= 151 and index <= 268: return True return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ Percentage human detected : 0.0 % Percentage human not detected : 100.0 % Dog detection : 93.0 % Dog not detection : 7.0 % ###Code #-#-# Do NOT modify the code above this line. #-#-# yes_human = 0 no_human = 0 ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. for image_path in tqdm(human_files_short): if dog_detector(image_path): yes_human += 1 else: no_human += 1 pos_dog = 0 neg_dog = 0 for image_path in tqdm(dog_files_short): if dog_detector(image_path): pos_dog += 1 else: neg_dog += 1 ## Reporting human_percent_correct = 100 * yes_human / (yes_human + no_human) human_percent_wrong = 100 * no_human / (yes_human + no_human) dog_percent_correct = 100 * pos_dog / (pos_dog + neg_dog) dog_percent_wrong = 100 * neg_dog / (pos_dog + neg_dog) print('Percentage human detected : ', human_percent_correct, '%') print('Percentage human not detected : ', human_percent_wrong, '%') print('Dog detection : ', dog_percent_correct, '%') print('Dog not detection : ', dog_percent_wrong, '%') ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import torchvision import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes transformations = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_dataset = torchvision.datasets.ImageFolder('./dogImages/train/', transform=transformations) val_dataset = datasets.ImageFolder('./dogImages/valid/', transform=transformations) test_dataset = datasets.ImageFolder('./dogImages/test/', transform=transformations) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=32) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, shuffle=True, batch_size=32) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, shuffle=True, batch_size=32) loaders_scratch = { 'train':train_loader, 'valid':val_loader, 'test':test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:The code resizes the image using PyTorch's inbuilt resizing option. The size for tensors, is the same as that for AlexNet's architecture, 3x224x224 Yes, I augmented the dataset by a random horizontal flip. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.features = nn.Sequential( nn.Conv2d(3, 64, 11, 4), nn.ReLU(), nn.MaxPool2d(3, 2), nn.Conv2d(64, 192, 5), nn.ReLU(), nn.MaxPool2d(3, 2), nn.Conv2d(192, 384, 3), nn.ReLU(), nn.Conv2d(384, 256, 3), nn.ReLU(), nn.MaxPool2d(3, 2) ) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256*6*6, 1008), nn.ReLU(), nn.Dropout(), nn.Linear(1008, 512), nn.ReLU(), nn.Linear(512, 133) ) self.avgPool = nn.AdaptiveAvgPool2d((6, 6)) def forward(self, x): ## Define forward behavior x = self.features(x) x = self.avgPool(x) x = x.view(x.size(0), 256*6*6) # x = F.softmax(x, dim=1) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available model_scratch = model_scratch.to(device) ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ I tried using a normal CNN from scratch and got a terrible accuracy, bordering on 1%. In order to improve the evaluation metrics, I decided to use a tried and tested architecture, to one of AlexNet. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.00001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 train_acc = 0 val_acc = 0 ################### # train the model # ################### model.train() for data, target in tqdm(loaders['train']): # move to GPU data, target = data.to(device), target.to(device) optimizer.zero_grad() out = model(data) # print(target) # print(out) out_idx = torch.topk(out, 1)[1] out_idx = out_idx.view(out_idx.size()[0]) train_acc += torch.sum(out_idx == target).item() loss = criterion(out, target) loss.backward() optimizer.step() train_loss += loss.item() train_loss = train_loss / (len(loaders['train']) * 32) train_acc = train_acc / (len(loaders['train']) * 32) train_acc *= 100 # train_loss = ((train_loss * batch_idx) + loss.detach().item()) / (batch_idx + 1) ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for data, target in tqdm(loaders['valid']): # move to GPU data, target = data.to(device), target.to(device) ## update the average validation loss out = model(data) loss = criterion(out, target) valid_loss += loss.item() out_idx = torch.topk(out, 1)[1] out_idx = out_idx.view(out_idx.size()[0]) val_acc += torch.sum(out_idx == target).item() valid_loss = valid_loss / (len(loaders['valid']) * 32) val_acc = val_acc / (len(loaders['valid']) * 32) val_acc *= 100 # valid_loss = ((valid_loss * batch_idx) + loss.detach().item()) / (batch_idx + 1) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) print('Epoch : {} \tTraininc Acc: {:.6f} \tValidation Acc : {:.6f}'.format( epoch, train_acc, val_acc )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: valid_loss_min = valid_loss torch.save(model.state_dict(), './' + save_path) # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code transformations = transforms.Compose([ transforms.Resize((224, 224)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_dataset = torchvision.datasets.ImageFolder('./dogImages/train/', transform=transformations) val_dataset = datasets.ImageFolder('./dogImages/valid/', transform=transformations) test_dataset = datasets.ImageFolder('./dogImages/test/', transform=transformations) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=32) val_loader = torch.utils.data.DataLoader(dataset=val_dataset, shuffle=True, batch_size=32) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, shuffle=True, batch_size=32) loaders_transfer = { 'train':train_loader, 'valid':val_loader, 'test':test_loader } ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) model_transfer.classifier = nn.Linear(2048, 133, bias=True) model_transfer = model_transfer.to(device) ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I decided to go with ResNet. While I do not have the math to back me up, it I intuitively feel that there are subtle differences between certain dog breeds. These differences might be washed away as a larger feature map is built. ResNet works by using the feature maps from previous layers onto each consequent layer. This, I feel will allow the model to identify and make good predictions for images where there are subtle differences. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adagrad(model_transfer.parameters(), lr=0.0005) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(100, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) transformation = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ]) img = transformation(img) img = img.view(1, *img.size()) img = img.to(device) out = model_transfer(img) out = torch.topk(out, 1)[1] return class_names[out] predict_breed_transfer(dog_files_short[9]) ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither breed = predict_breed_transfer(img_path) if face_detector(img_path): print('You Look like a ...') print(breed) else: print('The dog breed is : ', breed) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("data/lfw/*/*")) dog_files = np.array(glob("data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces_detected = np.sum([face_detector(img_file) for img_file in human_files_short]) print('Percentage of human images have detected human face: {}%'.format(human_faces_detected)) dog_faces_detected = np.sum([face_detector(img_file) for img_file in dog_files_short]) print('Percentage of dog images have detected human face: {}%'.format(dog_faces_detected)) ###Output Percentage of human images have detected human face: 98% Percentage of dog images have detected human face: 8% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img = transform(img).unsqueeze(0) if use_cuda: img = img.cuda() predictions = VGG16(img) predicted_class = predictions.argmax().item() return predicted_class # predicted class index VGG16_predict(dog_files_short[0]) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. predicted_class = VGG16_predict(img_path) if predicted_class >= 151 and predicted_class <= 268: return True else: return False return None # true/false print('Dog detected in image: ', dog_detector(dog_files_short[0])) print('Dog detected in image: ', dog_detector(human_files_short[0])) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_files_dog_detected = np.sum([dog_detector(image) for image in dog_files_short]) human_files_dog_detected = np.sum([dog_detector(image) for image in human_files_short]) print('Percentage of human images have detected dog: {}%'.format(human_files_dog_detected)) print('Percentage of dog images have detected dog: {}%'.format(dog_files_dog_detected)) ###Output Percentage of human images have detected dog: 3% Percentage of dog images have detected dog: 99% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os import torch from torchvision import datasets from PIL import Image import torchvision.transforms as transforms # check if CUDA is available use_cuda = torch.cuda.is_available() ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # TODO: Define transforms for the training data and testing data data_dir = 'data/dog_images' train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) valid_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) valid_data = datasets.ImageFolder(data_dir + '/valid', transform=valid_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=32, shuffle=True) validloader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True) testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- For training, the images are randomly are resized at cropped by 224x224 pixels from the center, and random rotation by 30 degrees, as well as horizontal flipping to random images are the transformations applied. For the validation and test images, I decided to resize the images at 255x255 dimension, since this is the usual standard followed and as per the images this size seems to be the adequate size for the network to train and learn.- Yes I decided to augment the data to have more data for every breed, horizontal flip seemed enough since apart from this transformation, rotation has already been applied. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code num_classes = len(glob('data/dog_images/train/*')) import torch.nn as nn # define the CNN architecture class Net(nn.Module): def __init__(self): super(Net, self).__init__() ## Follow the architecture of VGG-16 # Size 224 self.conv1 = nn.Conv2d(3, 64, kernel_size=5, padding=2) self.conv2 = nn.Conv2d(64, 128, kernel_size=5, padding=2) self.conv3 = nn.Conv2d(128, 256, kernel_size=5, padding=2) self.conv4 = nn.Conv2d(256, 512, kernel_size=5, padding=2) self.conv5 = nn.Conv2d(512, 1024, kernel_size=5, padding=2) # Batch Normalization is a technique to improve performance and stability # of an artifical neural network. # It provides zero mean and unit variance as inputs to any layers. self.batch_norm64 = nn.BatchNorm2d(64) self.batch_norm128 = nn.BatchNorm2d(128) self.batch_norm256 = nn.BatchNorm2d(256) self.batch_norm512 = nn.BatchNorm2d(512) self.batch_norm1024 = nn.BatchNorm2d(1024) self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2) self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(0.3) self.fc1 = nn.Linear(1024 * 7 * 7, 1024) self.fc2 = nn.Linear(1024, 1024) self.fc3 = nn.Linear(1024, 133) # Feedforward def forward(self, x): x = self.relu(self.batch_norm64(self.conv1(x))) x = self.max_pool(x) x = self.relu(self.batch_norm128(self.conv2(x))) x = self.max_pool(x) x = self.relu(self.batch_norm256(self.conv3(x))) x = self.max_pool(x) x = self.relu(self.batch_norm512(self.conv4(x))) x = self.max_pool(x) x = self.relu(self.batch_norm1024(self.conv5(x))) x = self.max_pool(x) # It returns a new tensor which has a different size # and it's the same data of self tensor # The -1 means inferring the size from other dimensions. x = x.view(x.size(0), -1) # x = self.dropout(x) x = self.dropout(self.relu(self.fc1(x))) x = self.dropout(self.relu(self.fc2(x))) x = self.fc3(x) return x # Create CNN instance! model_scratch = Net() # If CUDA is avaliable, Move Tensors to GPU if use_cuda: model_scratch.cuda() model_scratch ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ My motivation was to imitate VGG-16 model, thus initially as an experimantation I duplicated convolutional layers in the last 2 layers. But due to large number of parameters, the model was taking long time per epoch and thus reduced the model layers to reduce training time (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = torch.optim.SGD(model_scratch.parameters(), lr=0.01, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True loaders_scratch = { 'train': trainloader, 'valid': validloader, 'test': testloader } def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # Clear the previous accumulated gradients optimizer.zero_grad() # Forward pass output = model_scratch(data) # Calculate loss loss = criterion(output, target) # Backward pass loss.backward() # Update parameters optimizer.step() # Update training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # Forward pass with torch.no_grad(): output = model_scratch(data) # Calculate loss loss = criterion(output, target) # Update validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} ----> {:.6f}). Saving model...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model n_epochs = 20 model_scratch = train(n_epochs, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.838781 Validation Loss: 4.830793 Validation loss decreased (inf ----> 4.830793). Saving model... Epoch: 2 Training Loss: 4.643821 Validation Loss: 4.851259 Epoch: 3 Training Loss: 4.546237 Validation Loss: 4.940435 Epoch: 4 Training Loss: 4.484255 Validation Loss: 4.907978 Epoch: 5 Training Loss: 4.408974 Validation Loss: 4.910291 Epoch: 6 Training Loss: 4.389276 Validation Loss: 5.579831 Epoch: 7 Training Loss: 4.321309 Validation Loss: 5.543218 Epoch: 8 Training Loss: 4.297254 Validation Loss: 5.484969 Epoch: 9 Training Loss: 4.239452 Validation Loss: 5.154432 Epoch: 10 Training Loss: 4.190886 Validation Loss: 5.114458 Epoch: 12 Training Loss: 4.115775 Validation Loss: 5.318768 Epoch: 13 Training Loss: 4.065929 Validation Loss: 5.060655 Epoch: 14 Training Loss: 4.025662 Validation Loss: 4.888445 Epoch: 15 Training Loss: 3.973153 Validation Loss: 5.855287 Epoch: 16 Training Loss: 3.952958 Validation Loss: 4.946625 Epoch: 17 Training Loss: 3.887287 Validation Loss: 5.491249 Epoch: 18 Training Loss: 3.844754 Validation Loss: 5.246060 Epoch: 19 Training Loss: 3.769247 Validation Loss: 5.386840 Epoch: 20 Training Loss: 3.725348 Validation Loss: 4.883481 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 4.820420 Test Accuracy: 2% (17/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # Using the same data loaders from previous step ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # Download Resnet50 pretrained model model_transfer = models.resnet50(pretrained=True) # Freeze parameters of the model to avoid brackpropagation for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133, bias=True) # # Define dog breed classifier part of model_transfer # classifier = nn.Sequential(nn.Linear(25088, 4096), # nn.ReLU(), # nn.Dropout(0.5), # nn.Linear(4096, 512), # nn.ReLU(), # nn.Dropout(0.5), # nn.Linear(512, num_classes)) # Replace the original classifier with the dog breed classifier from above # model_transfer.classifier = classifier if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm num_files = 20 human_files_short = human_files[:num_files] dog_files_short = dog_files[:num_files] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_result = [] dog_result = [] for image in human_files_short: human_result.append(face_detector(image)) for image in dog_files_short: dog_result.append(face_detector(image)) human_percentage = human_result.count(True) / len(human_files_short) dog_percentage = dog_result.count(True) / len(dog_files_short) print('Human Percentage {}'.format(human_percentage)) print('Dog Percentage {}'.format(dog_percentage)) ###Output Human Percentage 1.0 Dog Percentage 0.1 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() print('Is cuda available {}'.format(use_cuda)) # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Is cuda available True ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from torch.autograd import Variable from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) image = Image.open(img_path) width, height = image.size if width < height: new_width = 256 new_height = 256 * height / width else: new_height = 256 new_width = 256 * width / height new_width = int(new_width) new_height = int(new_height) image.resize((new_width, new_height)) offset = 0.5 * (256 - 224) image = image.crop((offset, offset, 256 - offset, 256 - offset)) image = np.array(image) / 256 image = (image - mean) / std image = image.transpose((2, 0, 1)) image = np.expand_dims(image, axis=0) image = torch.from_numpy(image).float() image = Variable(image) if use_cuda: image = image.cuda() output = VGG16(image) return output # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. output = VGG16_predict(img_path) probs = torch.exp(output).data top_prob, index = probs.topk(1, 1, sorted=True) index = index[0, 0] return index >= 151 and index <= 268 # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_result = [] dog_result = [] for image in human_files_short: human_result.append(dog_detector(image)) for image in dog_files_short: dog_result.append(dog_detector(image)) human_percentage = human_result.count(True) / len(human_files_short) dog_percentage = dog_result.count(True) / len(dog_files_short) print('Human Percentage {}'.format(human_percentage)) print('Dog Percentage {}'.format(dog_percentage)) ###Output Human Percentage 0.0 Dog Percentage 0.85 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from torch.autograd import Variable from PIL import Image import torchvision.transforms as transforms data_dir = 'dogImages' loaders_scratch = {} mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std)]) valid_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)]) test_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)]) traindatasets = datasets.ImageFolder(train_dir, transform=train_transforms) validdatasets = datasets.ImageFolder(valid_dir, transform=valid_transforms) testdatasets = datasets.ImageFolder(test_dir, transform=test_transforms) train_loader = torch.utils.data.DataLoader(traindatasets, batch_size=64, shuffle=True) valid_loader = torch.utils.data.DataLoader(validdatasets, batch_size=64) test_loader = torch.utils.data.DataLoader(testdatasets, batch_size=64) loaders_scratch['train'] = train_loader loaders_scratch['valid'] = valid_loader loaders_scratch['test'] = test_loader ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? My code resize the image to size of 3 x 224 x 224 by cropping it.I choose this input size because it is a typical size for CNN with image classification task, but I think that also a 3 x 128 x 128 can be a good choice based on the paper of Inception128px.I augment the dataset with random rotation and random horizontal flip.With these transformation I ensure to have a large datasets with space variance alongside the image and my CNN can learn patterns in images better. **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import os import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict CLASSIFIER_INPUT_SIZE = 128 * 14 * 14 OUTPUT_SIZE = len(os.listdir(train_dir)) print('Classifier input size {}'.format(CLASSIFIER_INPUT_SIZE)) class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() self.features = nn.Sequential(OrderedDict([ ('conv1', nn.Conv2d(3, 32, 3, 1, 1)), ('relu1', nn.ReLU(True)), ('batch1', nn.BatchNorm2d(32)), ('maxpooling1', nn.MaxPool2d(2)), ('conv2', nn.Conv2d(32, 64, 3, 1, 1)), ('relu2', nn.ReLU(True)), ('batch2', nn.BatchNorm2d(64)), ('maxpooling2', nn.MaxPool2d(2)), ('conv3', nn.Conv2d(64, 128, 3, 1, 1)), ('relu3', nn.ReLU(True)), ('batch3', nn.BatchNorm2d(128)), ('maxpooling3', nn.MaxPool2d(2)), ('conv4', nn.Conv2d(128, 128, 3, 1, 1)), ('relu4', nn.ReLU(True)), ('batch4', nn.BatchNorm2d(128)), ('maxpooling4', nn.MaxPool2d(2)), ])) self.classifier = nn.Sequential(OrderedDict([ ('linear5', nn.Linear(CLASSIFIER_INPUT_SIZE, 1024)), ('relu5', nn.ReLU(True)), ('linear7', nn.Linear(1024, OUTPUT_SIZE)), ])) def forward(self, x): ## Define forward behavior output = self.features(x) output = output.view(output.size(0), -1) output = self.classifier(output) return output #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() print('Model successfully to cuda') else: print('No cuda available') ###Output Classifier input size 25088 Model successfully to cuda ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ My CNN have 4 layer in total which give a light model, simple to train from scratch. I choose to have an output with equal size of VGG16, 25088 from the conv layer. I start with a very deep model similarly to VGG16 architecture but is is to deeper and it won't learn in a reasonable time. After I restart with only two layer and stride of 2 but this downsampling my image too fast and I lost a lot of "detail". At the end I choose to use "only" 4 layer with maxpooling with stride 2 after each conv layer for downsampling the image and "lost" details in previous layers. I choose to increase the numbers of output filter up to 128 and image size to 3* 14 * 14 to avoid have a much memory expensive network without lose the most important details. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim folders = os.listdir(train_dir) num_items = 0 for folder in folders: num_items += len(os.listdir(os.path.join(train_dir, folder))) print('Num items {}'.format(num_items)) ### TODO: select loss function learning_rate = 0.02 print('Learning rate {}'.format(learning_rate)) criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-3) ###Output Num items 6680 Learning rate 0.02 ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile import numpy as np ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU data, target = Variable(data), Variable(target) optimizer.zero_grad() # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.item() - train_loss)) ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() correct = 0. total = 0. for batch_idx, (data, target) in enumerate(loaders['valid']): data, target = Variable(data), Variable(target) # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) loss = criterion(output, target).item() valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss - valid_loss)) pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) print('\nValidation Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss ## TODO: save the model if validation loss has decreased training_loss = 0 valid_loss = 0 # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.777310 Validation Loss: 4.519900 Validation Accuracy: 5% (42/835) Epoch: 2 Training Loss: 4.534286 Validation Loss: 4.479089 Validation Accuracy: 7% (65/835) Epoch: 3 Training Loss: 4.410173 Validation Loss: 4.195355 Validation Accuracy: 7% (59/835) Epoch: 4 Training Loss: 4.259764 Validation Loss: 4.030461 Validation Accuracy: 8% (68/835) Epoch: 5 Training Loss: 4.105577 Validation Loss: 4.265612 Validation Accuracy: 7% (60/835) Epoch: 6 Training Loss: 3.992488 Validation Loss: 3.924752 Validation Accuracy: 12% (104/835) Epoch: 7 Training Loss: 3.861263 Validation Loss: 3.868459 Validation Accuracy: 11% (97/835) Epoch: 8 Training Loss: 3.729037 Validation Loss: 3.822270 Validation Accuracy: 13% (111/835) Epoch: 9 Training Loss: 3.633309 Validation Loss: 4.092292 Validation Accuracy: 11% (97/835) Epoch: 10 Training Loss: 3.547998 Validation Loss: 3.696106 Validation Accuracy: 15% (132/835) Epoch: 11 Training Loss: 3.459809 Validation Loss: 3.593946 Validation Accuracy: 17% (142/835) Epoch: 12 Training Loss: 3.409940 Validation Loss: 3.675973 Validation Accuracy: 17% (144/835) Epoch: 13 Training Loss: 3.359961 Validation Loss: 3.640369 Validation Accuracy: 19% (160/835) Epoch: 14 Training Loss: 3.258942 Validation Loss: 3.476987 Validation Accuracy: 20% (168/835) Epoch: 15 Training Loss: 3.191183 Validation Loss: 3.629390 Validation Accuracy: 19% (159/835) Epoch: 16 Training Loss: 3.122905 Validation Loss: 3.626160 Validation Accuracy: 19% (160/835) Epoch: 17 Training Loss: 3.098581 Validation Loss: 3.503741 Validation Accuracy: 21% (177/835) Epoch: 18 Training Loss: 3.028547 Validation Loss: 3.459014 Validation Accuracy: 20% (169/835) Epoch: 19 Training Loss: 2.976369 Validation Loss: 3.559885 Validation Accuracy: 21% (176/835) Epoch: 20 Training Loss: 2.924535 Validation Loss: 3.927740 Validation Accuracy: 18% (152/835) Epoch: 21 Training Loss: 2.880781 Validation Loss: 3.680494 Validation Accuracy: 18% (153/835) Epoch: 22 Training Loss: 2.816296 Validation Loss: 3.234332 Validation Accuracy: 23% (194/835) Epoch: 23 Training Loss: 2.765469 Validation Loss: 3.208399 Validation Accuracy: 23% (196/835) Epoch: 24 Training Loss: 2.698041 Validation Loss: 3.374178 Validation Accuracy: 25% (209/835) Epoch: 25 Training Loss: 2.706004 Validation Loss: 3.312982 Validation Accuracy: 25% (211/835) Epoch: 26 Training Loss: 2.662891 Validation Loss: 3.420150 Validation Accuracy: 22% (184/835) Epoch: 27 Training Loss: 2.608623 Validation Loss: 3.373385 Validation Accuracy: 25% (213/835) Epoch: 28 Training Loss: 2.590501 Validation Loss: 3.643311 Validation Accuracy: 22% (185/835) Epoch: 29 Training Loss: 2.487463 Validation Loss: 3.462132 Validation Accuracy: 26% (218/835) Epoch: 30 Training Loss: 2.472647 Validation Loss: 3.695313 Validation Accuracy: 24% (202/835) Epoch: 31 Training Loss: 2.453033 Validation Loss: 3.410683 Validation Accuracy: 23% (199/835) Epoch: 32 Training Loss: 2.449828 Validation Loss: 3.706874 Validation Accuracy: 24% (204/835) Epoch: 33 Training Loss: 2.361570 Validation Loss: 3.772785 Validation Accuracy: 22% (188/835) Epoch: 34 Training Loss: 2.366759 Validation Loss: 3.865519 Validation Accuracy: 24% (203/835) Epoch: 35 Training Loss: 2.346021 Validation Loss: 3.608007 Validation Accuracy: 23% (195/835) Epoch: 36 Training Loss: 2.316890 Validation Loss: 3.764065 Validation Accuracy: 22% (189/835) Epoch: 37 Training Loss: 2.229207 Validation Loss: 3.512529 Validation Accuracy: 22% (185/835) Epoch: 38 Training Loss: 2.220559 Validation Loss: 3.480146 Validation Accuracy: 25% (212/835) Epoch: 39 Training Loss: 2.180157 Validation Loss: 3.861901 Validation Accuracy: 23% (195/835) Epoch: 40 Training Loss: 2.152197 Validation Loss: 3.635415 Validation Accuracy: 25% (213/835) Epoch: 41 Training Loss: 2.152969 Validation Loss: 3.620613 Validation Accuracy: 26% (221/835) Epoch: 42 Training Loss: 2.103179 Validation Loss: 3.740241 Validation Accuracy: 24% (208/835) Epoch: 43 Training Loss: 2.107478 Validation Loss: 3.744149 Validation Accuracy: 24% (205/835) Epoch: 44 Training Loss: 2.061158 Validation Loss: 3.622105 Validation Accuracy: 25% (213/835) Epoch: 45 Training Loss: 2.035155 Validation Loss: 3.891294 Validation Accuracy: 23% (199/835) Epoch: 46 Training Loss: 2.072779 Validation Loss: 3.619996 Validation Accuracy: 25% (217/835) Epoch: 47 Training Loss: 2.034036 Validation Loss: 3.341177 Validation Accuracy: 27% (227/835) Epoch: 48 Training Loss: 2.030651 Validation Loss: 3.428611 Validation Accuracy: 24% (208/835) Epoch: 49 Training Loss: 1.938107 Validation Loss: 3.786107 Validation Accuracy: 26% (220/835) Epoch: 50 Training Loss: 1.909426 Validation Loss: 3.789727 Validation Accuracy: 27% (229/835) Epoch: 51 Training Loss: 1.945745 Validation Loss: 4.114004 Validation Accuracy: 24% (205/835) Epoch: 52 Training Loss: 1.918756 Validation Loss: 3.903389 Validation Accuracy: 19% (162/835) Epoch: 53 Training Loss: 1.914105 Validation Loss: 4.120264 Validation Accuracy: 24% (206/835) Epoch: 54 Training Loss: 1.943195 Validation Loss: 3.973119 Validation Accuracy: 26% (220/835) Epoch: 55 Training Loss: 1.882392 Validation Loss: 3.775207 Validation Accuracy: 25% (213/835) Epoch: 56 Training Loss: 1.878068 Validation Loss: 3.976165 Validation Accuracy: 26% (220/835) Epoch: 57 Training Loss: 1.851898 Validation Loss: 4.042262 Validation Accuracy: 25% (216/835) Epoch: 58 Training Loss: 1.879067 Validation Loss: 4.148463 Validation Accuracy: 25% (209/835) Epoch: 59 Training Loss: 1.848723 Validation Loss: 3.964169 Validation Accuracy: 26% (225/835) Epoch: 60 Training Loss: 1.748424 Validation Loss: 3.742666 Validation Accuracy: 25% (215/835) Epoch: 61 Training Loss: 1.787618 Validation Loss: 4.057632 Validation Accuracy: 20% (167/835) Epoch: 62 Training Loss: 1.792708 Validation Loss: 4.042075 Validation Accuracy: 27% (227/835) Epoch: 63 Training Loss: 1.762781 Validation Loss: 4.142543 Validation Accuracy: 25% (216/835) Epoch: 64 Training Loss: 1.783208 Validation Loss: 3.793295 Validation Accuracy: 26% (221/835) Epoch: 65 Training Loss: 1.747741 Validation Loss: 3.941714 Validation Accuracy: 26% (220/835) Epoch: 66 Training Loss: 1.661829 Validation Loss: 3.654822 Validation Accuracy: 28% (235/835) Epoch: 67 Training Loss: 1.770068 Validation Loss: 4.017632 Validation Accuracy: 26% (222/835) Epoch: 68 Training Loss: 1.730649 Validation Loss: 3.898750 Validation Accuracy: 26% (222/835) Epoch: 69 Training Loss: 1.727663 Validation Loss: 4.249949 Validation Accuracy: 25% (217/835) Epoch: 70 Training Loss: 1.675278 Validation Loss: 4.029518 Validation Accuracy: 25% (210/835) Epoch: 71 Training Loss: 1.688793 Validation Loss: 4.426994 Validation Accuracy: 23% (195/835) Epoch: 72 Training Loss: 1.684037 Validation Loss: 4.171834 Validation Accuracy: 25% (211/835) Epoch: 73 Training Loss: 1.714822 Validation Loss: 4.411053 Validation Accuracy: 27% (227/835) Epoch: 74 Training Loss: 1.691088 Validation Loss: 3.927490 Validation Accuracy: 24% (201/835) Epoch: 75 Training Loss: 1.614402 Validation Loss: 4.414209 Validation Accuracy: 24% (208/835) Epoch: 76 Training Loss: 1.658148 Validation Loss: 4.129785 Validation Accuracy: 25% (212/835) Epoch: 77 Training Loss: 1.660147 Validation Loss: 4.120088 Validation Accuracy: 25% (213/835) Epoch: 78 Training Loss: 1.605301 Validation Loss: 3.767071 Validation Accuracy: 25% (210/835) Epoch: 79 Training Loss: 1.622279 Validation Loss: 4.740960 Validation Accuracy: 23% (196/835) Epoch: 80 Training Loss: 1.647179 Validation Loss: 4.139406 Validation Accuracy: 23% (195/835) Epoch: 81 Training Loss: 1.552050 Validation Loss: 4.011592 Validation Accuracy: 24% (202/835) Epoch: 82 Training Loss: 1.640856 Validation Loss: 4.076022 Validation Accuracy: 25% (209/835) Epoch: 83 Training Loss: 1.579748 Validation Loss: 4.434347 Validation Accuracy: 26% (224/835) Epoch: 84 Training Loss: 1.615021 Validation Loss: 4.278351 Validation Accuracy: 25% (215/835) Epoch: 85 Training Loss: 1.645976 Validation Loss: 4.368806 Validation Accuracy: 26% (223/835) Epoch: 86 Training Loss: 1.624756 Validation Loss: 4.413261 Validation Accuracy: 24% (203/835) Epoch: 87 Training Loss: 1.616026 Validation Loss: 4.924155 Validation Accuracy: 20% (170/835) Epoch: 88 Training Loss: 1.576312 Validation Loss: 4.400606 Validation Accuracy: 25% (214/835) Epoch: 89 Training Loss: 1.576409 Validation Loss: 3.980802 Validation Accuracy: 25% (215/835) Epoch: 90 Training Loss: 1.520435 Validation Loss: 4.045828 Validation Accuracy: 26% (220/835) Epoch: 91 Training Loss: 1.572095 Validation Loss: 4.694325 Validation Accuracy: 23% (199/835) Epoch: 92 Training Loss: 1.472982 Validation Loss: 4.987889 Validation Accuracy: 24% (205/835) Epoch: 93 Training Loss: 1.530399 Validation Loss: 4.133497 Validation Accuracy: 28% (240/835) Epoch: 94 Training Loss: 1.546266 Validation Loss: 4.487242 Validation Accuracy: 24% (205/835) Epoch: 95 Training Loss: 1.536525 Validation Loss: 4.647429 Validation Accuracy: 21% (180/835) Epoch: 96 Training Loss: 1.547354 Validation Loss: 4.120974 Validation Accuracy: 23% (197/835) Epoch: 97 Training Loss: 1.525024 Validation Loss: 4.417398 Validation Accuracy: 24% (207/835) Epoch: 98 Training Loss: 1.520941 Validation Loss: 4.912024 Validation Accuracy: 22% (192/835) Epoch: 99 Training Loss: 1.508975 Validation Loss: 4.539204 Validation Accuracy: 23% (195/835) Epoch: 100 Training Loss: 1.458140 Validation Loss: 4.439117 Validation Accuracy: 23% (195/835) ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.143963 Test Accuracy: 26% (225/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = {} loaders_transfer['train'] = train_loader loaders_transfer['valid'] = valid_loader loaders_transfer['test'] = test_loader ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn OUTPUT_SIZE = len(os.listdir(train_dir)) use_cuda = torch.cuda.is_available() ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) model_transfer.classifier = nn.Sequential(OrderedDict([ ('linear5', nn.Linear(25088, 1024)), ('relu5', nn.ReLU(True)), ('linear6', nn.Linear(1024, 1024)), ('relu6', nn.ReLU(True)), ('linear7', nn.Linear(1024, OUTPUT_SIZE)) ])) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I choose to use a VGG16 with replace of classifier with a classifier that have to Linear Layer with output 1024 nodes and the last with output nodes as the number of classes.I think this model is suitable for current problem because CNN pretrained give me a good pretrained model with imagenet weights and I can, with a little effort train my classifier to give me a good accuracy. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01, momentum=0.9) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 10 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 3.025239 Validation Loss: 1.118613 Validation Accuracy: 62% (522/835) Epoch: 2 Training Loss: 1.589267 Validation Loss: 1.279551 Validation Accuracy: 70% (590/835) Epoch: 3 Training Loss: 1.322781 Validation Loss: 0.734560 Validation Accuracy: 75% (630/835) Epoch: 4 Training Loss: 1.237176 Validation Loss: 0.709053 Validation Accuracy: 75% (631/835) Epoch: 5 Training Loss: 1.168923 Validation Loss: 0.648575 Validation Accuracy: 79% (660/835) Epoch: 6 Training Loss: 1.088965 Validation Loss: 0.799079 Validation Accuracy: 78% (652/835) Epoch: 7 Training Loss: 1.043683 Validation Loss: 0.602357 Validation Accuracy: 80% (673/835) Epoch: 8 Training Loss: 1.018101 Validation Loss: 0.659810 Validation Accuracy: 79% (667/835) Epoch: 9 Training Loss: 0.960181 Validation Loss: 0.716184 Validation Accuracy: 80% (668/835) Epoch: 10 Training Loss: 0.955277 Validation Loss: 0.709732 Validation Accuracy: 77% (651/835) ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.640797 Test Accuracy: 80% (673/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] model_transfer.load_state_dict(torch.load('model_transfer.pt')) class_names = [item[4:].replace("_", " ") for item in traindatasets.classes] def preprocess_image(image): mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) width, height = image.size if width < height: new_width = 256 new_height = 256 * height / width else: new_height = 256 new_width = 256 * width / height new_width = int(new_width) new_height = int(new_height) image.resize((new_width, new_height)) offset = 0.5 * (256 - 224) image = image.crop((offset, offset, 256 - offset, 256 - offset)) image = np.array(image) / 256 image = (image - mean) / std image = image.transpose((2, 0, 1)) image = np.expand_dims(image, axis=0) image = torch.from_numpy(image).float() if use_cuda: image = image.cuda() return image def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path) image = preprocess_image(image) output = model_transfer.forward(image) pred = output.data.max(1, keepdim=True)[1] return pred ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): print('image path {}'.format(img_path)) image = Image.open(img_path) plt.imshow(image) plt.show() if dog_detector(img_path) is True: prediction = predict_breed_transfer(img_path) print('dog prediction {}'.format(prediction)) print('You look like a ...') print(class_names[prediction[0][0]]) elif face_detector(img_path) is True: prediction = predict_breed_transfer(img_path) print('human prediction {}'.format(prediction)) print('You look like a ...') print(class_names[prediction[0][0]]) else: print('There was an error. This image is not supported') # display the image, along with bounding box ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The output is worse than I expected. I can use a much deeper classification with start output size 4096 which can learn other patterns for classify correctly imagesI can use dropout in classifier for generalize betterI can use batch normalization for increase performance ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. my_images = np.array(glob("my_images/*")) ## suggested code, below for file in my_images: run_app(file) ###Output image path my_images/educazione-gentile-cane-640x425.jpg ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. count_human_face = 0 for file in human_files_short: if face_detector(file): count_human_face += 1 print("Percentage human face in human_files: {}".format(count_human_face)) count_human_face = 0 for file in dog_files_short: if face_detector(file): count_human_face += 1 print("Percentage human face in dog_files: {}".format(count_human_face)) ###Output Percentage human face in human_files: 98 Percentage human face in dog_files: 17 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:06<00:00, 83963402.46it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path) image = image.convert('RGB') normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) transforn = transforms.Compose([ transforms.Resize(size=(224, 224)), transforms.ToTensor(), normalize]) image = transforn(image) image = image.unsqueeze(0) if use_cuda: image = image.cuda() result = VGG16(image) return torch.argmax(result).item() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. result = VGG16_predict(img_path) if result >= 151 and result <= 268: return True return False #return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. dog_image_count_in_human_file = 0 dog_image_count_in_dog_file = 0 for file in human_files_short: if dog_detector(file): dog_image_count_in_human_file += 1 for file in dog_files_short: if dog_detector(file): dog_image_count_in_dog_file += 1 percentage_dog_image_in_human_file = dog_image_count_in_human_file / len(human_files_short) * 100 percentage_dog_image_in_dog_file = dog_image_count_in_dog_file / len(dog_files_short) * 100 print("Percentage of dog image in human file: {}".format(percentage_dog_image_in_human_file)) print("Percentage of dog image in dog file: {}".format(percentage_dog_image_in_dog_file)) ###Output Percentage of dog image in human file: 0.0 Percentage of dog image in dog file: 100.0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes batch_size = 16 num_workers = 0 data_dir = '/data/dog_images/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir,'test/') normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) transform_train = transforms.Compose([ transforms.Resize(size=(224, 224)), transforms.CenterCrop((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), normalize]) transform_valid = transforms.Compose([ transforms.Resize(size=(224, 224)), transforms.CenterCrop((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), normalize]) transform_test = transforms.Compose([ transforms.Resize(size=(224, 224)), transforms.CenterCrop((224,224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), normalize]) train_data = datasets.ImageFolder(train_dir, transform=transform_train) valid_data = datasets.ImageFolder(valid_dir, transform=transform_valid) test_data = datasets.ImageFolder(test_dir, transform=transform_test) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:Resize data to 224x224 and using center croping. Using tensor size of 224x224 because of model structure for VGG.Using horizontal flip and random rotation augumentation. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding = 1) self.conv2 = nn.Conv2d(16, 32, 3, padding = 1) self.conv3 = nn.Conv2d(32, 64, 3, padding = 1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 28 * 28, 800) self.fc2 = nn.Linear(800, 200) self.batch_norm = nn.BatchNorm1d(num_features=800) self.dropout = nn.Dropout(0.2) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv2(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv3(x))) x = self.dropout(x) x = x.view(x.size(0), -1) x = F.relu(self.batch_norm(self.fc1(x))) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ Using three conv layers and two fully connected layers.Using relu activation in fully connected layers.Using max pooling of 2x2 after every conv layer output.Also using drop out after every convolution layer output. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.02) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss with torch.no_grad(): output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(30, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.867288 Validation Loss: 4.848532 Epoch: 2 Training Loss: 4.412717 Validation Loss: 4.558363 Epoch: 3 Training Loss: 4.168945 Validation Loss: 4.696062 Epoch: 4 Training Loss: 3.978247 Validation Loss: 4.291984 Epoch: 5 Training Loss: 3.788543 Validation Loss: 4.207588 Epoch: 6 Training Loss: 3.608011 Validation Loss: 4.159845 Epoch: 7 Training Loss: 3.419348 Validation Loss: 4.083534 Epoch: 8 Training Loss: 3.208256 Validation Loss: 4.023393 Epoch: 9 Training Loss: 2.989136 Validation Loss: 3.953406 Epoch: 10 Training Loss: 2.745501 Validation Loss: 3.936363 Epoch: 11 Training Loss: 2.495452 Validation Loss: 4.259219 Epoch: 12 Training Loss: 2.246160 Validation Loss: 3.740906 Epoch: 13 Training Loss: 1.977466 Validation Loss: 3.872247 Epoch: 14 Training Loss: 1.715116 Validation Loss: 3.791591 Epoch: 15 Training Loss: 1.507339 Validation Loss: 4.054789 Epoch: 16 Training Loss: 1.260053 Validation Loss: 3.783102 Epoch: 17 Training Loss: 1.062026 Validation Loss: 4.029415 Epoch: 18 Training Loss: 0.901284 Validation Loss: 3.877983 Epoch: 19 Training Loss: 0.739113 Validation Loss: 3.897521 Epoch: 20 Training Loss: 0.598171 Validation Loss: 3.924863 Epoch: 21 Training Loss: 0.515742 Validation Loss: 3.931738 Epoch: 22 Training Loss: 0.428331 Validation Loss: 3.933689 Epoch: 23 Training Loss: 0.364203 Validation Loss: 3.935229 Epoch: 24 Training Loss: 0.298396 Validation Loss: 4.085920 Epoch: 25 Training Loss: 0.274021 Validation Loss: 3.995818 Epoch: 26 Training Loss: 0.238851 Validation Loss: 4.139762 Epoch: 27 Training Loss: 0.193249 Validation Loss: 4.037052 Epoch: 28 Training Loss: 0.173754 Validation Loss: 4.286242 Epoch: 29 Training Loss: 0.162235 Validation Loss: 4.168129 Epoch: 30 Training Loss: 0.144060 Validation Loss: 3.980262 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.733293 Test Accuracy: 13% (115/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) if use_cuda: model_transfer = model_transfer.cuda() model_transfer ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ Using the ResNet40 pretrained model for my use.This is good model and passed on many benchmarks and due to which expecting it to give better results. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.fc.parameters(), lr=0.002) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model #model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 2.738163 Validation Loss: 1.083485 Epoch: 2 Training Loss: 1.124861 Validation Loss: 1.236399 Epoch: 3 Training Loss: 0.946256 Validation Loss: 1.185159 Epoch: 4 Training Loss: 0.885808 Validation Loss: 1.053156 Epoch: 5 Training Loss: 0.788521 Validation Loss: 1.153058 Epoch: 6 Training Loss: 0.762749 Validation Loss: 1.286854 Epoch: 7 Training Loss: 0.726474 Validation Loss: 1.120108 Epoch: 8 Training Loss: 0.670105 Validation Loss: 1.167427 Epoch: 9 Training Loss: 0.709908 Validation Loss: 1.198924 Epoch: 10 Training Loss: 0.634178 Validation Loss: 1.279406 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) from PIL import Image import torchvision.transforms as transforms def load_image(img_path): image = Image.open(img_path).convert('RGB') transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), normalize]) image = transform(image)[:3,:,:].unsqueeze(0) return image ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] #class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = load_image(img_path) if use_cuda: image = image.cuda() output = model_transfer(image) _, pred = torch.max(output, 1) prediction = np.squeeze(pred.numpy()) if not use_cuda else np.squeeze(pred.cpu().numpy()) return class_names[prediction] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if(face_detector(img_path)): breed = predict_breed_transfer(img_path) print("Resembling dog breed: {}".format(breed)) elif (dog_detector(img_path)): breed = predict_breed_transfer(img_path) print("Dog breed: {}".format(breed)) else: print("Neither dog nor human!") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)Train model for more interations.Try different network architecture and choose best one.Try different hyper parameters like learning rate, batch size, different optimizer functions. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output Resembling dog breed: French bulldog Resembling dog breed: Dachshund Resembling dog breed: Portuguese water dog Dog breed: Bullmastiff Dog breed: Bullmastiff Dog breed: Bullmastiff ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ Given below. ###Code from tqdm import tqdm pool_size = 100 human_files_short = human_files[:pool_size] dog_files_short = dog_files[:pool_size] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. n_human_faces = 0 n_dog_faces = 0 for human_img in human_files_short: if face_detector(human_img) > 0: n_human_faces += 1 for dog_img in dog_files_short: if face_detector(dog_img) > 0: n_dog_faces += 1 print("Percentage of human faces recognized: {}%".format(n_human_faces / pool_size * 100)) print("Percentage of dogs recognized as human faces: {}%".format(n_dog_faces / pool_size * 100)) ###Output Percentage of human faces recognized: 98.0% Percentage of dogs recognized as human faces: 17.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 109010654.84it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) image = in_transform(image)[:3,:,:].unsqueeze(0) if use_cuda: image = image.cuda() scores = VGG16(image) _, pred = torch.max(scores, 1) return pred.item() # predicted class index VGG16_predict('/data/dog_images/train/001.Affenpinscher/Affenpinscher_00001.jpg') ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. idx = VGG16_predict(img_path) if idx >= 151 and idx <= 268: return True else: return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. n_human_dogs = 0 n_dog_dogs = 0 for human_img in human_files_short: if dog_detector(human_img): n_human_dogs += 1 for dog_img in dog_files_short: if dog_detector(dog_img): n_dog_dogs += 1 print("Percentage of humans recognized as dogs: {}%".format(n_human_dogs / pool_size * 100)) print("Percentage of dogs recognized: {}%".format(n_dog_dogs / pool_size * 100)) ###Output Percentage of humans recognized as dogs: 2.0% Percentage of dogs recognized: 100.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. import torch import torchvision.models as models def img_net_predict(img_path): image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) image = in_transform(image)[:3,:,:].unsqueeze(0) if use_cuda: image = image.cuda() scores = img_net(image) _, pred = torch.max(scores, 1) return pred.item() # predicted class index def img_net_dog_detector(img_path): idx = VGG16_predict(img_path) if idx >= 151 and idx <= 268: return True else: return False def test_net_dog_recognition_perf(img_net): # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: img_net = img_net.cuda() n_human_dogs = 0 n_dog_dogs = 0 for human_img in human_files_short: if img_net_dog_detector(human_img): n_human_dogs += 1 for dog_img in dog_files_short: if img_net_dog_detector(dog_img): n_dog_dogs += 1 print("Percentage of humans recognized as dogs: {}%".format(n_human_dogs / pool_size * 100)) print("Percentage of dogs recognized: {}%".format(n_dog_dogs / pool_size * 100)) print("RESNET-50 performance") test_net_dog_recognition_perf(models.resnet50(pretrained=True)) print("Inception-v3 performance") test_net_dog_recognition_perf(models.inception_v3(pretrained=True)) ###Output RESNET-50 performance ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes dog_data_dir = '/data/dog_images' # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(10), transforms.Resize((255, 255)), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) test_transforms = transforms.Compose([transforms.Resize((255, 255)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(dog_data_dir + '/train', transform=train_transforms) validation_data = datasets.ImageFolder(dog_data_dir + '/valid', transform=test_transforms) test_data = datasets.ImageFolder(dog_data_dir + '/test', transform=test_transforms) batch_size = 50 trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) validationloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) loaders_scratch = {'train': trainloader, 'valid': validationloader, 'test': testloader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:For the resizing part I first resize to 255 to then crop to 224. These transforms were shown earlier in lessons and looking at the actual pictures in the dataset it seems very reasonable to use them here as well. We always want all images to be the same size and 224x244 is what pretrained models accept. But we can also see that most pictures have a lot of useless (or even misleading) background around the dog. Therefore, it's a good idea to get rid of at least a part of it.For the augmentation part I also used transforms earlier seen in the classes - it's random rotation and horizontal flip. I just changed the rotation angle to max 10 degrees, because I haven't seen pictures which were more skewed, and therefore I didn't think the network needed to learn to recognize rotated dogs. On the other hand, other available transforms, like jitter, grayscale, vertical flip and also image corrections didn't seem to give much hope for any boost in the network performance, they'd rather introduce unnecessary noise. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F def weights_init_normal(m): '''Takes in a module and initializes all linear layers with weight values taken from a normal distribution.''' classname = m.__class__.__name__ # for every Linear layer in a model # m.weight.data shoud be taken from a normal distribution # m.bias.data should be 0 if classname.find('Linear') != -1: # get the number of the inputs n = m.in_features y = 1.0/np.sqrt(n) m.weight.data.normal_(0, y) m.bias.data.fill_(0) # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 16, 3, padding=1) self.conv2 = nn.Conv2d(16, 32, 3, padding=1) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64 * 28 * 28, 4096) self.fc2 = nn.Linear(4096, 133) self.dropout = nn.Dropout(0.2) def forward(self, x): ## Define forward behavior x = F.relu(self.pool(self.conv1(x))) x = F.relu(self.pool(self.conv2(x))) x = F.relu(self.pool(self.conv3(x))) x = x.view(-1, 64 * 28 * 28) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() model_scratch.apply(weights_init_normal) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ First I tested more complex structures - with 5 convolutional layers and 3 fully connected layers. After every convolutional layer I put max pooling, because I needed to get pretty small images to be sent to fc layers. This was because my GPU (6GB) could not handle bigger fc layers. Later I gave up and moved to the workspace, because those complex networks weren't learning at all, no matter the learning rate and number of epochs. In the workspace I tested the above architecture with 3 convolutional layers, each one followed by max pooling, and 2 fc layers with 20% dropout, and with batches of size 50. I used Relu all the time as publications suggest that all other activation functions slow down the learning process a couple of times. To get a solution that satisfies the conditions of >10% accuracy I needed to tune the learning rate and to use weight decay. Interestingly, without the weight decay the network got its best validation loss in the fifth epoch and then highly overfitted. With the weight decay the best model was found in the 11th epoch.EDIT: After the workspace reloaded I had to run some cells again and accidentally I lost my results. I started to train the same net again and I found out that sometimes it doesn't learn for many epochs. One time the network started to really learn something only after 16 epochs. Then I increased the number of epochs to 30, but this time the network started learning right from the start. It seems that when the starting training loss is above 4.5, the network has problems learning. If it starts below, everything is fine. However, 30 epochs was not enough, so I run additional 10 and then another 10 for the same model, 50 epochs in total. This is why below you'll see losses from only 10 epochs.The final network needed 48 epochs to get the best validation loss of 3.762568 and accuracy of 13%. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001, weight_decay=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path, valid_loss_min=np.Inf): """returns trained model""" # initialize tracker for minimum validation loss for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.item() - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.item() - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## Save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt', valid_loss_min=3.788655) ###Output Epoch: 1 Training Loss: 3.597042 Validation Loss: 3.810074 Validation loss decreased (3.878192 --> 3.810074). Saving model ... Epoch: 2 Training Loss: 3.592975 Validation Loss: 3.859040 Epoch: 3 Training Loss: 3.571597 Validation Loss: 3.819179 Epoch: 4 Training Loss: 3.547706 Validation Loss: 3.832856 Epoch: 5 Training Loss: 3.532144 Validation Loss: 3.828675 Epoch: 6 Training Loss: 3.517211 Validation Loss: 3.818338 Epoch: 7 Training Loss: 3.496079 Validation Loss: 3.788655 Validation loss decreased (3.810074 --> 3.788655). Saving model ... Epoch: 8 Training Loss: 3.492988 Validation Loss: 3.762568 Validation loss decreased (3.788655 --> 3.762568). Saving model ... Epoch: 9 Training Loss: 3.480653 Validation Loss: 3.778163 Epoch: 10 Training Loss: 3.479877 Validation Loss: 3.813257 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.779476 Test Accuracy: 13% (112/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders dog_data_dir = '/data/dog_images' # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(10), transforms.Resize((255, 255)), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) test_transforms = transforms.Compose([transforms.Resize((255, 255)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(dog_data_dir + '/train', transform=train_transforms) validation_data = datasets.ImageFolder(dog_data_dir + '/valid', transform=test_transforms) test_data = datasets.ImageFolder(dog_data_dir + '/test', transform=test_transforms) batch_size = 50 trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True) validationloader = torch.utils.data.DataLoader(validation_data, batch_size=batch_size) testloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) loaders_transfer = {'train': trainloader, 'valid': validationloader, 'test': testloader} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## Specify model architecture model_transfer = models.vgg16(pretrained=True) for param in model_transfer.features.parameters(): param.requires_grad = False class Classifier(nn.Module): def __init__(self): super(Classifier, self).__init__() self.fc1 = nn.Linear(25088, 4096) self.fc2 = nn.Linear(4096, 1024) self.fc3 = nn.Linear(1024, 133) self.dropout = nn.Dropout(0.25) def forward(self, x): x = x.view(-1, 25088) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) return x model_transfer.classifier = Classifier() model_transfer.apply(weights_init_normal) print(model_transfer) if use_cuda: model_transfer = model_transfer.cuda() ###Output VGG( (features): Sequential( (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU(inplace) (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): ReLU(inplace) (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (6): ReLU(inplace) (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (8): ReLU(inplace) (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (11): ReLU(inplace) (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU(inplace) (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (15): ReLU(inplace) (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (18): ReLU(inplace) (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (20): ReLU(inplace) (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (22): ReLU(inplace) (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (25): ReLU(inplace) (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (27): ReLU(inplace) (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (29): ReLU(inplace) (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (classifier): Classifier( (fc1): Linear(in_features=25088, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (fc3): Linear(in_features=1024, out_features=133, bias=True) (dropout): Dropout(p=0.25) ) ) ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ In the first phase I was experimenting to make the net learn at all. I started with replacing just the final fc layer to get an appropriate output of 133 classes. Then I started experimenting with replacing the entire classifier with a one with two or three fc layers. All those networks did not learn. I finally found a bug in my code - in the train method I hardcoded model_scratch instead of the model from the method attributes.As soon as I had a bug-free code, I first tested a very simple classifier with just two fc layers and dropout. It turned out too simple to actually learn the needed patterns. Then I used a rule-of-thumb classifier - three fc layers with around 4-8 times decay in the number of neurons (typically the range is rather 1-8 or even 1-4, but I didn't want the network to be too large) and a reasonable dropout after every layer (25%). After second try with the learning rate (the first one was 0.1, but then the validation loss started increasing from the second epoch, so I finally used 0.001) and weight decay and I got it working with 76% accuracy. Probably I could obtain an even better model with more training epochs. I used 20, but the decay in the validation loss, although very noisy, was still there at the very end of learning. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr=0.001, weight_decay=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') ###Output Epoch: 1 Training Loss: 3.393542 Validation Loss: 1.162795 Validation loss decreased (inf --> 1.162795). Saving model ... Epoch: 2 Training Loss: 1.299239 Validation Loss: 1.117902 Validation loss decreased (1.162795 --> 1.117902). Saving model ... Epoch: 3 Training Loss: 1.039870 Validation Loss: 0.909252 Validation loss decreased (1.117902 --> 0.909252). Saving model ... Epoch: 4 Training Loss: 0.972300 Validation Loss: 0.820686 Validation loss decreased (0.909252 --> 0.820686). Saving model ... Epoch: 5 Training Loss: 0.969592 Validation Loss: 0.823599 Epoch: 6 Training Loss: 0.930910 Validation Loss: 0.835809 Epoch: 7 Training Loss: 0.850796 Validation Loss: 0.911110 Epoch: 8 Training Loss: 0.871943 Validation Loss: 0.783921 Validation loss decreased (0.820686 --> 0.783921). Saving model ... Epoch: 9 Training Loss: 0.862556 Validation Loss: 0.763478 Validation loss decreased (0.783921 --> 0.763478). Saving model ... Epoch: 10 Training Loss: 0.882461 Validation Loss: 0.858286 Epoch: 11 Training Loss: 0.834243 Validation Loss: 0.936590 Epoch: 12 Training Loss: 0.840143 Validation Loss: 0.760842 Validation loss decreased (0.763478 --> 0.760842). Saving model ... Epoch: 13 Training Loss: 0.819983 Validation Loss: 0.754218 Validation loss decreased (0.760842 --> 0.754218). Saving model ... Epoch: 14 Training Loss: 0.830542 Validation Loss: 0.871964 Epoch: 15 Training Loss: 0.832333 Validation Loss: 0.899776 Epoch: 16 Training Loss: 0.860489 Validation Loss: 0.754542 Epoch: 17 Training Loss: 0.838359 Validation Loss: 0.917032 Epoch: 18 Training Loss: 0.799759 Validation Loss: 0.724765 Validation loss decreased (0.754218 --> 0.724765). Saving model ... Epoch: 19 Training Loss: 0.797012 Validation Loss: 0.933001 Epoch: 20 Training Loss: 0.824906 Validation Loss: 0.773712 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.777468 Test Accuracy: 76% (643/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. from PIL import Image # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def load_image(img_path): image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize((255, 255)), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # discard the transparent, alpha channel (that's the :3) and add the batch dimension image = in_transform(image)[:3,:,:].unsqueeze(0) if use_cuda: image = image.cuda() return image def predict_breed_transfer(img_path): # load the image and return the predicted breed img = load_image(img_path) scores = model_transfer(img) _, pred = torch.max(scores, 1) return class_names[pred.item()] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def print_image(img_path): img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) plt.imshow(img) plt.show() def run_app(img_path): print("Welcome in the dog breed recognition app!") ## handle cases for a human face, dog, and neither is_face = face_detector(img_path) is_dog = dog_detector(img_path) if is_face or is_dog: breed = predict_breed_transfer(img_path) else: print("Error! Neither a human face nor a dog picture provided.") if is_face: print_image(img_path) print("You look like a {}".format(breed)) else: print_image(img_path) print("This is a {}".format(breed)) print("Thank you for using our software!") print() ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) - The accuracy was better than I expected - I got 76% compared to required 60%. But I tested just a couple of classifier configurations, because it takes so long to train and get results. Definitely this result can be improved by using hyperparameter tuning - either grid or random search or something more sophisticated like simulated annealing. Furthermore, it would be good tu run a genetic algorithm to find the best architecture in terms of the number of layers and the number of neurons in each as well as the best dropout percentages.- As for the app it would be much, much better if the app showed examples of dogs of a given breed when a picture of a human is provided. Otherwise, just the name of the breed will not tell most people much. Maybe one could also try to use style transfer to transform the human face to a dog one of the given breed. Unfortunately, I had no time to play with these ideas.- Possibly some further improvements could be made by using other input image augmentations and by using additional types of layers in the network, like the batchnorm mentioned in the student hub by mentors. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[np.random.randint(0, len(human_files), size=3)], dog_files[np.random.randint(0, len(dog_files), size=3)])): run_app(file) ###Output Welcome in the dog breed recognition app! ###Markdown Reference Links from Project Review FeedbackIf you are curious/want to learn more, I recommend you to take a look at the links mentioned below:- CNN's for Visual Recognition http://cs231n.github.io/- Building an Image Classifier https://medium.com/towards-data-science/learning-about-data-science-building-an-image-classifier-part-2-a7bcc6d5e825- Large Scale Image Recognition using CNN's https://arxiv.org/pdf/1409.1556.pdf- Transfer Learning http://cs231n.github.io/transfer-learning/- CNN Tricks http://lamda.nju.edu.cn/weixs/project/CNNTricks/CNNTricks.html- http://ruder.io/optimizing-gradient-descent/index.html- A resource if you care to read about other general improvements.https://machinelearningmastery.com/improve-deep-learning-performance/Its good that you implemented separate data loader for training, validation, and test datasets. Nice work! You may be able to calculate means dynamically with something like np.mean(train_set.train_data, axis=(0,1,2))/255): https://discuss.pytorch.org/t/normalization-in-the-mnist-example/457/12Good job discussing the architecture of your CNN.Some suggestions for improvement:- You can use batch normalisation to further improve the performance of your model. BatchNorm avoids "internal covariate shift" as it minimises the effect of weights and parameters in successive forward and back pass on the initial data normalisation done to make the data comparable across features as explained here - https://keras.io/layers/normalization/ - https://www.quora.com/Why-does-batch-normalization-help- This stackexchange question has some ideas on how to improve for architecture for better accuracy. https://datascience.stackexchange.com/questions/20104/how-to-improve-my-test-accuracy-using-cnn-in-tensorflow Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. Download the datasetexecute only once ###Code # Jupyter executable commands for downloading and extracting datasets to the home directory !wget -qq https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip !unzip -qq dogImages.zip !rm dogImages.zip !wget -qq https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip !unzip -qq lfw.zip !rm lfw.zip !rm -r __MACOSX import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. #print(human_files_short) count_human_face = 0 count_dog_face = 0 for human in human_files_short: if face_detector(human) == True: count_human_face +=1 for dog in dog_files_short: if face_detector(dog) == True: count_dog_face +=1 accuracy_human_face = (count_human_face/100)*100 accuracy_dog_face = (count_dog_face/100)*100 print(accuracy_human_face) print(accuracy_dog_face) ###Output 98.0 17.0 ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: # To avoid out-of-memory GPU errors torch.cuda.empty_cache() VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:05<00:00, 103376741.50it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. ###Code VGG16 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code def image_to_tensor(img_path): ''' As per Pytorch documentations: All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. You can use the following transform to normalize: ''' img = Image.open(img_path).convert('RGB') transformations = transforms.Compose([transforms.Resize(size=(244, 244)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) image_tensor = transformations(img)[:3,:,:].unsqueeze(0) return image_tensor from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image_tensor = image_to_tensor(img_path) # Move tensor to GPU if available if use_cuda: # To avoid out-of-memory GPU errors torch.cuda.empty_cache() image_tensor = image_tensor.cuda() ## Inference # Turn on evaluation mode VGG16.eval() output = VGG16(image_tensor) #return index # predicted class index # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) pred = np.squeeze(preds_tensor.numpy()) if not use_cuda else np.squeeze(preds_tensor.cpu().numpy()) return int(pred) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. index = VGG16_predict(img_path) if index>=151 and index <=268: return True return False ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. count_human_face = 0 count_dog_face = 0 for human in human_files_short: if dog_detector(human) == True: count_human_face +=1 for dog in dog_files_short: if dog_detector(dog) == True: count_dog_face +=1 accuracy_human_face = (count_human_face/100)*100 accuracy_dog_face = (count_dog_face/100)*100 print(accuracy_human_face) print(accuracy_dog_face) ###Output 0.0 100.0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from torchvision.transforms import transforms from torch.utils.data import DataLoader ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes #Define transformations for the training set, flip the images randomly, crop out and apply mean and std normalization ## Might need to iterate on this transformation train_transforms = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ]) test_transforms = transforms.Compose([transforms.Resize(size=(224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ]) val_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ]) #Load the training set data_dir = 'dogImages' train_set = datasets.ImageFolder(os.path.join(data_dir, 'train'), transform=train_transforms) val_set = datasets.ImageFolder(os.path.join(data_dir, 'valid'), transform=val_transforms) test_set = datasets.ImageFolder(os.path.join(data_dir, 'test'), transform=test_transforms) #Create a loder for the train, test and validation set train_loader = DataLoader(train_set,batch_size=128,shuffle=True,num_workers=0) test_loader = DataLoader(test_set,batch_size=128,shuffle=True,num_workers=0) val_loader = DataLoader(val_set,batch_size=128,shuffle=True,num_workers=0) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- Did RandomResizedCrop for train data. Used the size 224 as VGG-16 supports that dimension. This API does both resizing and cropping. - I did image augmentation for train and test data using RandomizeFlip. I did not do any augmentation for validation set as it is used in model performance evaluation (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F import numpy as np num_classes = 133 # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # pool self.pool = nn.MaxPool2d(2, 2) # fully-connected self.fc1 = nn.Linear(7*7*128, 500) self.fc2 = nn.Linear(500, num_classes) # drop-out self.dropout = nn.Dropout(0.3) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = F.relu(self.conv3(x)) x = self.pool(x) # flatten x = x.view(-1, 7*7*128) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: # To avoid out-of-memory GPU errors torch.cuda.empty_cache() model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=6272, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.3) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ - Initially I planned to do a 5 layer deep CNN network thinking of achieveing higher test accuracy. But I ended up with CUDA cache size issue. So later I reduced it to 3 layer conv netowrk. - I applied kernel_size of 3 with stride 2 for the 1st Conv2d layer. This downsizes the input image by 2- The 3rd conv layer consist of kernel size 3 and default stride (1,1) which will not reduce image size- After final maxpooling with stride 2, the total output image size is downsized by factor of 32 and the depth will be 128. - Applied a dropout = 0.3 to prevent overfitting- The final fully connected layer has an output dimention of number of dog breed classes as we are building a classifier. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.Adam(model_scratch.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda!=None: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear gradients optimizer.zero_grad() # forward pass output = model(data) # calculate loss loss = criterion(output, target) # backward pass loss.backward() # perform optimization optimizer.step() # update training loss train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda != None: data, target = data.cuda(), target.cuda() ## update the average validation loss with torch.no_grad(): output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model loaders_scratch = {'train': train_loader, 'valid': val_loader, 'test': test_loader} # train the model model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.856330 Validation Loss: 4.743987 Validation loss decreased (inf --> 4.743987). Saving model... Epoch: 2 Training Loss: 4.684622 Validation Loss: 4.484132 Validation loss decreased (4.743987 --> 4.484132). Saving model... Epoch: 3 Training Loss: 4.532074 Validation Loss: 4.330174 Validation loss decreased (4.484132 --> 4.330174). Saving model... Epoch: 4 Training Loss: 4.404692 Validation Loss: 4.252783 Validation loss decreased (4.330174 --> 4.252783). Saving model... Epoch: 5 Training Loss: 4.308007 Validation Loss: 4.139692 Validation loss decreased (4.252783 --> 4.139692). Saving model... Epoch: 6 Training Loss: 4.219197 Validation Loss: 4.050989 Validation loss decreased (4.139692 --> 4.050989). Saving model... Epoch: 7 Training Loss: 4.143009 Validation Loss: 4.025209 Validation loss decreased (4.050989 --> 4.025209). Saving model... Epoch: 8 Training Loss: 4.119780 Validation Loss: 3.939097 Validation loss decreased (4.025209 --> 3.939097). Saving model... Epoch: 9 Training Loss: 4.031329 Validation Loss: 3.845957 Validation loss decreased (3.939097 --> 3.845957). Saving model... Epoch: 10 Training Loss: 3.941018 Validation Loss: 3.779047 Validation loss decreased (3.845957 --> 3.779047). Saving model... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code torch.cuda.empty_cache() def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.935060 Test Accuracy: 12% (101/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture # download VGG16 pretrained model model_transfer = models.vgg16(pretrained=True) # Freeze parameters of the model to avoid brackpropagation for param in model_transfer.parameters(): param.requires_grad = False # get the number of dog classes from the train_dataset number_of_dog_classes = len(train_set.classes) # Define dog breed classifier part of model_transfer classifier = nn.Sequential(nn.Linear(25088, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 512), nn.ReLU(), nn.Dropout(0.5), nn.Linear(512, number_of_dog_classes)) # Rplace the original classifier with the dog breed classifier from above model_transfer.classifier = classifier if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ - I used VGG-16 as the transfer learning model- I've pull out the final Fully-connected layer and replaced with Fully-connected layer with output of number of dog breed classes (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_transfer.classifier.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): valid_loss_min = np.Inf print(f"Batch Size: {loaders['train'].batch_size}\n") for epoch in range(1, n_epochs+1): train_loss = 0.0 valid_loss = 0.0 # train the model model.train() for batch_idx, (data, target) in enumerate(loaders['train']): if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) if (batch_idx + 1) % 5 == 0: print(f'Epoch:{epoch}/{n_epochs} \tBatch:{batch_idx + 1}') print(f'Train Loss: {train_loss}\n') # validate the model model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): if use_cuda: data, target = data.cuda(), target.cuda() with torch.no_grad(): output = model(data) loss = criterion(output, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) # save the model if validation loss has decreased if valid_loss < valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model...'.format(valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # define loaders_transfer loaders_transfer = {'train': train_loader, 'valid': val_loader, 'test': test_loader} model_transfer = train(10, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Batch Size: 128 Epoch:1/10 Batch:5 Train Loss: 6.4378981590271 Epoch:1/10 Batch:10 Train Loss: 5.667230606079102 Epoch:1/10 Batch:15 Train Loss: 5.392285346984863 Epoch:1/10 Batch:20 Train Loss: 5.2346954345703125 Epoch:1/10 Batch:25 Train Loss: 5.1252288818359375 Epoch:1/10 Batch:30 Train Loss: 5.0407514572143555 Epoch:1/10 Batch:35 Train Loss: 4.97988748550415 Epoch:1/10 Batch:40 Train Loss: 4.921975135803223 Epoch:1/10 Batch:45 Train Loss: 4.860908031463623 Epoch:1/10 Batch:50 Train Loss: 4.799076080322266 Epoch: 1 Training Loss: 4.767495 Validation Loss: 3.207707 Validation loss decreased (inf --> 3.207707). Saving model... Epoch:2/10 Batch:5 Train Loss: 4.01640510559082 Epoch:2/10 Batch:10 Train Loss: 3.9062700271606445 Epoch:2/10 Batch:15 Train Loss: 3.8314292430877686 Epoch:2/10 Batch:20 Train Loss: 3.7651517391204834 Epoch:2/10 Batch:25 Train Loss: 3.71815824508667 Epoch:2/10 Batch:30 Train Loss: 3.661747694015503 Epoch:2/10 Batch:35 Train Loss: 3.588341474533081 Epoch:2/10 Batch:40 Train Loss: 3.510035514831543 Epoch:2/10 Batch:45 Train Loss: 3.47878098487854 Epoch:2/10 Batch:50 Train Loss: 3.4271485805511475 Epoch: 2 Training Loss: 3.398296 Validation Loss: 1.679442 Validation loss decreased (3.207707 --> 1.679442). Saving model... Epoch:3/10 Batch:5 Train Loss: 2.9035353660583496 Epoch:3/10 Batch:10 Train Loss: 2.92600154876709 Epoch:3/10 Batch:15 Train Loss: 2.8960957527160645 Epoch:3/10 Batch:20 Train Loss: 2.8886892795562744 Epoch:3/10 Batch:25 Train Loss: 2.8496391773223877 Epoch:3/10 Batch:30 Train Loss: 2.82936692237854 Epoch:3/10 Batch:35 Train Loss: 2.808506488800049 Epoch:3/10 Batch:40 Train Loss: 2.7778565883636475 Epoch:3/10 Batch:45 Train Loss: 2.7583467960357666 Epoch:3/10 Batch:50 Train Loss: 2.7392752170562744 Epoch: 3 Training Loss: 2.728635 Validation Loss: 1.229546 Validation loss decreased (1.679442 --> 1.229546). Saving model... Epoch:4/10 Batch:5 Train Loss: 2.4825148582458496 Epoch:4/10 Batch:10 Train Loss: 2.455249547958374 Epoch:4/10 Batch:15 Train Loss: 2.480926513671875 Epoch:4/10 Batch:20 Train Loss: 2.4689807891845703 Epoch:4/10 Batch:25 Train Loss: 2.4228689670562744 Epoch:4/10 Batch:30 Train Loss: 2.408151149749756 Epoch:4/10 Batch:35 Train Loss: 2.403019905090332 Epoch:4/10 Batch:40 Train Loss: 2.387916088104248 Epoch:4/10 Batch:45 Train Loss: 2.373253583908081 Epoch:4/10 Batch:50 Train Loss: 2.379511594772339 Epoch: 4 Training Loss: 2.375812 Validation Loss: 1.122839 Validation loss decreased (1.229546 --> 1.122839). Saving model... Epoch:5/10 Batch:5 Train Loss: 2.3035311698913574 Epoch:5/10 Batch:10 Train Loss: 2.314666986465454 Epoch:5/10 Batch:15 Train Loss: 2.2898948192596436 Epoch:5/10 Batch:20 Train Loss: 2.2940449714660645 Epoch:5/10 Batch:25 Train Loss: 2.2898523807525635 Epoch:5/10 Batch:30 Train Loss: 2.2877683639526367 Epoch:5/10 Batch:35 Train Loss: 2.289156675338745 Epoch:5/10 Batch:40 Train Loss: 2.2727770805358887 Epoch:5/10 Batch:45 Train Loss: 2.2666525840759277 Epoch:5/10 Batch:50 Train Loss: 2.251713752746582 Epoch: 5 Training Loss: 2.249902 Validation Loss: 0.977056 Validation loss decreased (1.122839 --> 0.977056). Saving model... Epoch:6/10 Batch:5 Train Loss: 2.156177282333374 Epoch:6/10 Batch:10 Train Loss: 2.1688358783721924 Epoch:6/10 Batch:15 Train Loss: 2.1753125190734863 Epoch:6/10 Batch:20 Train Loss: 2.15897536277771 Epoch:6/10 Batch:25 Train Loss: 2.139754056930542 Epoch:6/10 Batch:30 Train Loss: 2.1455888748168945 Epoch:6/10 Batch:35 Train Loss: 2.1432101726531982 Epoch:6/10 Batch:40 Train Loss: 2.1500537395477295 Epoch:6/10 Batch:45 Train Loss: 2.154005527496338 Epoch:6/10 Batch:50 Train Loss: 2.1412394046783447 Epoch: 6 Training Loss: 2.133628 Validation Loss: 0.936038 Validation loss decreased (0.977056 --> 0.936038). Saving model... Epoch:7/10 Batch:5 Train Loss: 2.0965027809143066 Epoch:7/10 Batch:10 Train Loss: 2.0663373470306396 Epoch:7/10 Batch:15 Train Loss: 2.0804991722106934 Epoch:7/10 Batch:20 Train Loss: 2.0565736293792725 Epoch:7/10 Batch:25 Train Loss: 2.057375431060791 Epoch:7/10 Batch:30 Train Loss: 2.070815324783325 Epoch:7/10 Batch:35 Train Loss: 2.0484964847564697 Epoch:7/10 Batch:40 Train Loss: 2.039828300476074 Epoch:7/10 Batch:45 Train Loss: 2.037508249282837 Epoch:7/10 Batch:50 Train Loss: 2.02662992477417 Epoch: 7 Training Loss: 2.030303 Validation Loss: 0.861054 Validation loss decreased (0.936038 --> 0.861054). Saving model... Epoch:8/10 Batch:5 Train Loss: 2.034153938293457 Epoch:8/10 Batch:10 Train Loss: 2.0203287601470947 Epoch:8/10 Batch:15 Train Loss: 2.0088558197021484 Epoch:8/10 Batch:20 Train Loss: 2.009859085083008 Epoch:8/10 Batch:25 Train Loss: 1.9836528301239014 Epoch:8/10 Batch:30 Train Loss: 1.9963550567626953 Epoch:8/10 Batch:35 Train Loss: 1.9746006727218628 Epoch:8/10 Batch:40 Train Loss: 1.9785840511322021 Epoch:8/10 Batch:45 Train Loss: 1.9806398153305054 Epoch:8/10 Batch:50 Train Loss: 1.9770692586898804 Epoch: 8 Training Loss: 1.979156 Validation Loss: 0.805731 Validation loss decreased (0.861054 --> 0.805731). Saving model... Epoch:9/10 Batch:5 Train Loss: 1.969130039215088 Epoch:9/10 Batch:10 Train Loss: 1.8918523788452148 Epoch:9/10 Batch:15 Train Loss: 1.854266881942749 Epoch:9/10 Batch:20 Train Loss: 1.8882747888565063 Epoch:9/10 Batch:25 Train Loss: 1.9258450269699097 Epoch:9/10 Batch:30 Train Loss: 1.9229233264923096 Epoch:9/10 Batch:35 Train Loss: 1.9190722703933716 Epoch:9/10 Batch:40 Train Loss: 1.9021841287612915 Epoch:9/10 Batch:45 Train Loss: 1.9008020162582397 Epoch:9/10 Batch:50 Train Loss: 1.8950743675231934 Epoch: 9 Training Loss: 1.888657 Validation Loss: 0.759022 Validation loss decreased (0.805731 --> 0.759022). Saving model... Epoch:10/10 Batch:5 Train Loss: 1.8089685440063477 Epoch:10/10 Batch:10 Train Loss: 1.8886243104934692 Epoch:10/10 Batch:15 Train Loss: 1.9097522497177124 Epoch:10/10 Batch:20 Train Loss: 1.8854939937591553 Epoch:10/10 Batch:25 Train Loss: 1.889748454093933 Epoch:10/10 Batch:30 Train Loss: 1.9073421955108643 Epoch:10/10 Batch:35 Train Loss: 1.8921124935150146 Epoch:10/10 Batch:40 Train Loss: 1.9065356254577637 Epoch:10/10 Batch:45 Train Loss: 1.9073125123977661 Epoch:10/10 Batch:50 Train Loss: 1.920322299003601 Epoch: 10 Training Loss: 1.920028 Validation Loss: 0.721950 Validation loss decreased (0.759022 --> 0.721950). Saving model... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.883034 Test Accuracy: 73% (617/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] class_names[:10] len(class_names) model_transfer.load_state_dict(torch.load('model_transfer.pt')) def image_input_tensor(img_path): ''' As per Pytorch documentations: All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]. You can use the following transform to normalize: ''' img = Image.open(img_path).convert('RGB') prediction_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) image_tensor = prediction_transform(img)[:3,:,:].unsqueeze(0) return image_tensor def predict_breed_transfer(model, class_names, img_path): # load the image and return the predicted breed img_tensor = image_input_tensor(img_path) if use_cuda: img_tensor = img_tensor.cuda() model_transfer.eval() # Get predicted category for image with torch.no_grad(): output = model_transfer(img_tensor) prediction = torch.argmax(output).item() # Turn off evaluation mode model_transfer.train() # Use prediction to get dog breed breed = class_names[prediction] return breed ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither img = Image.open(img_path) plt.imshow(img) plt.show() if dog_detector(img_path): prediction = predict_breed_transfer(model_transfer, class_names, img_path) print("Dogs Detected!\nIt is a breed type {0}".format(prediction)) elif face_detector(img_path) > 0: prediction = predict_breed_transfer(model_transfer, class_names, img_path) print("Hello, human!\nYou look like a {0}".format(prediction)) else: print("Can't detect anything..") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)- The ourput looks good with the data tested. Maybe with more testing data we could find some faults. But in general I would try below approaches for model performance improvement1. Hyperparameter tuning - play with variables like learning rate, dropout, random weight initialization, batch size, different types of optimizers2. Run those different combinations of hyperparameter of different types of models (VGG-16 vs ResNet50 vs DenseNet121 etc.) and see which model is better3. Try an ensembel of models4. Look into bias variance tradeoff curves and see if we are overfitting or underfitting. Based on that we can take other approaches like data augmentation/using more train data etc. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) 99.0 percentage of human files have a detected face. 9.0 percentage of dog files have a detected face. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. n_human_files_has_face = 0 for f in human_files_short: if face_detector(f): n_human_files_has_face += 1 n_dog_files_has_face = 0 for f in dog_files_short: if face_detector(f): n_dog_files_has_face += 1 percent_human_detected_face = n_human_files_has_face / len(human_files_short) * 100.0 percent_dog_detected_face = n_dog_files_has_face / len(dog_files_short) * 100.0 print(f"{percent_human_detected_face} percentage of human files have a detected face.") print(f"{percent_dog_detected_face} percentage of dog files have a detected face.") ###Output 99.0 percentage of human files have a detected face. 9.0 percentage of dog files have a detected face. ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms from torch.autograd import Variable # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) # Read bytes and store as an img. transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform(img) # PyTorch pretrained models expect the Tensor dims to be (num input imgs, num color channels, height, width). # Currently however, we have (num color channels, height, width); let's fix this by inserting a new axis. img = img.unsqueeze(0) # Insert the new axis at index 0 i.e. in front of the other axes/dims. # Now that we have preprocessed our img, we need to convert it into a # Variable; PyTorch models expect inputs to be Variables. A PyTorch Variable is a # wrapper around a PyTorch Tensor. img = Variable(img).cuda() prediction = VGG16(img) prediction = prediction.data.cpu().numpy().argmax() return prediction # predicted class index img_path = "dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg" pred = VGG16_predict(img_path) print(pred) ###Output 252 ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred = VGG16_predict(img_path) return pred >= 151 and pred <= 268 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 2.0 percentage of images in human_files_short have a detected dog 98.0 percentage of images in dog_files_short have a detected dog ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. n_human_detected_dog = 0 for f in human_files_short: if dog_detector(f): n_human_detected_dog += 1 n_dog_detected_dog = 0 for f in dog_files_short: if dog_detector(f): n_dog_detected_dog += 1 percent_human_detected_dog = n_human_detected_dog / len(human_files_short) * 100. percent_dog_detected_dog = n_dog_detected_dog / len(dog_files_short) * 100. print(f"{percent_human_detected_dog} percentage of images in human_files_short have a detected dog.") print(f"{percent_dog_detected_dog} percentage of images in dog_files_short have a detected dog.") ###Output 2.0 percentage of images in human_files_short have a detected dog. 98.0 percentage of images in dog_files_short have a detected dog. ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. from PIL import Image import torchvision.transforms as transforms from torch.autograd import Variable # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def Inception_v3_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) # Read bytes and store as an img. transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) img = transform(img) # PyTorch pretrained models expect the Tensor dims to be (num input imgs, num color channels, height, width). # Currently however, we have (num color channels, height, width); let's fix this by inserting a new axis. img = img.unsqueeze(0) # Insert the new axis at index 0 i.e. in front of the other axes/dims. # Now that we have preprocessed our img, we need to convert it into a # Variable; PyTorch models expect inputs to be Variables. A PyTorch Variable is a # wrapper around a PyTorch Tensor. img = Variable(img).cuda() prediction = Inception_v3(img) prediction = prediction.data.cpu().numpy().argmax() return prediction # predicted class index ### returns "True" if a dog is detected in the image stored at img_path def dog_detector_inception(img_path): ## TODO: Complete the function. pred = Inception_v3_predict(img_path) return pred >= 151 and pred <= 268 ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. n_human_detected_dog = 0 for f in human_files_short: if dog_detector(f): n_human_detected_dog += 1 n_dog_detected_dog = 0 for f in dog_files_short: if dog_detector(f): n_dog_detected_dog += 1 percent_human_detected_dog = n_human_detected_dog / len(human_files_short) * 100. percent_dog_detected_dog = n_dog_detected_dog / len(dog_files_short) * 100. print(f"{percent_human_detected_dog} precentage of images in human_files_short have a detected dog") print(f"{percent_dog_detected_dog} precentage of images in dog_files_short have a detected dog") ###Output 3.0 precentage of images in human_files_short have a detected dog 97.0 precentage of images in dog_files_short have a detected dog ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes train_data_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) data_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) # define training and test data directories data_dir = 'dogImages/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') train_data = datasets.ImageFolder(train_dir, transform=train_data_transform) valid_data = datasets.ImageFolder(valid_dir, transform=data_transform) test_data = datasets.ImageFolder(test_dir, transform=data_transform) # print out some data stats print('Num training images: ', len(train_data)) print('Num validation images: ', len(valid_data)) print('Num test images: ', len(test_data)) # number of subprocesses to use for data loading num_workers = 0 # how many samples per batch to load batch_size = 20 # prepare data loaders (combine dataset and sampler) loaders_scratch = {} loaders_scratch['train'] = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True, ) loaders_scratch['valid'] = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False, ) loaders_scratch['test'] = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False, ) ###Output Num training images: 6680 Num validation images: 835 Num test images: 836 ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:* For training images, my code randomly resizes and crops them to size of 224, randomly applies a horizontal flip, and then normalizes the data. For validation and testing images, my code resizes them to size of 256, center crops them to size of 244, and then normalizes the data. The size for the input tensor is 224x224. I picked this size based on the success of dog detectors using pre-trainined networks using ImageNet images.* I used the horizontal flip to augment the training dataset. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F num_classes = 133 # total classes of dog breeds # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # convolutional layer (sees 224x224x3 image tensor) self.conv1 = nn.Conv2d(3, 8, 3, padding=1) # convolutional layer (sees 112x112x8 tensor) self.conv2 = nn.Conv2d(8, 16, 3, padding=1) # convolutional layer (sees 56x56x16 tensor) self.conv3 = nn.Conv2d(16, 32, 3, padding=1) # convolutional layer (sees 28x28x32 tensor) self.conv4 = nn.Conv2d(32, 64, 3, padding=1) # convolutional layer (sees 14x14x64 tensor) self.conv5 = nn.Conv2d(64, 128, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) # linear layer (128 * 7 * 7 -> 1024) self.fc1 = nn.Linear(128 * 7 * 7, 1024) # linear layer (500 -> 133) self.fc2 = nn.Linear(1024, num_classes) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): ## Define forward behavior # add sequence of convolutional and max pooling layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) # flatten image input x = x.view(-1, 128 * 7 * 7) # add dropout layer x = self.dropout(x) # add 1st hidden layer, with relu activation function, and add dropout layer x = self.dropout(F.relu(self.fc1(x))) # add 2nd hidden layer, with relu activation function x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ For feature mapping, I added sequence of convolutional and max pooling layers. I used relu activaton function for all 5 convolutional layers. Each conv layer has kernel_size of 3 with stride 2, this will lead to downsize of input image by 2. Maxpooling with stride 2 is used to downsize input image by 2. At the end of feature mappng layers, it will generate 128-channel feature maps with spatial size of 7x7. For classification, I added two hidden layers. The first layer is a linear layer with relu activation functon, and adding dropout layer to overcome overfitting. The second layer is a linear layer which has the number of output nodes that match the number of dog breed classes. The node with maximum reponse corresponds to the predicted dog breed class. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile import time ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): start = time.time() # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: valid_loss_min = valid_loss torch.save(model.state_dict(), save_path) print(f'Epoch {epoch}: Training took {time.time() - start} seconds') # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.886100 Validation Loss: 4.871809 Epoch 1: Training took 69.82323026657104 seconds Epoch: 2 Training Loss: 4.864949 Validation Loss: 4.843792 Epoch 2: Training took 69.83498883247375 seconds Epoch: 3 Training Loss: 4.812358 Validation Loss: 4.778625 Epoch 3: Training took 70.46817922592163 seconds Epoch: 4 Training Loss: 4.745510 Validation Loss: 4.664632 Epoch 4: Training took 69.74657034873962 seconds Epoch: 5 Training Loss: 4.647446 Validation Loss: 4.459115 Epoch 5: Training took 70.0101387500763 seconds Epoch: 6 Training Loss: 4.596620 Validation Loss: 4.404420 Epoch 6: Training took 69.68021154403687 seconds Epoch: 7 Training Loss: 4.534386 Validation Loss: 4.376098 Epoch 7: Training took 69.53632307052612 seconds Epoch: 8 Training Loss: 4.484714 Validation Loss: 4.260687 Epoch 8: Training took 70.21100878715515 seconds Epoch: 9 Training Loss: 4.446212 Validation Loss: 4.251204 Epoch 9: Training took 69.49314856529236 seconds Epoch: 10 Training Loss: 4.384246 Validation Loss: 4.259652 Epoch 10: Training took 70.58685946464539 seconds Epoch: 11 Training Loss: 4.333156 Validation Loss: 4.202802 Epoch 11: Training took 69.54722166061401 seconds Epoch: 12 Training Loss: 4.283099 Validation Loss: 4.087390 Epoch 12: Training took 69.63668084144592 seconds Epoch: 13 Training Loss: 4.235709 Validation Loss: 3.954558 Epoch 13: Training took 70.06266236305237 seconds Epoch: 14 Training Loss: 4.168249 Validation Loss: 3.937389 Epoch 14: Training took 70.02547454833984 seconds Epoch: 15 Training Loss: 4.140210 Validation Loss: 3.964548 Epoch 15: Training took 70.43982934951782 seconds Epoch: 16 Training Loss: 4.074340 Validation Loss: 3.930811 Epoch 16: Training took 69.96099758148193 seconds Epoch: 17 Training Loss: 4.059928 Validation Loss: 3.937668 Epoch 17: Training took 69.73089671134949 seconds Epoch: 18 Training Loss: 3.983751 Validation Loss: 3.878890 Epoch 18: Training took 70.11740326881409 seconds Epoch: 19 Training Loss: 3.939219 Validation Loss: 3.777211 Epoch 19: Training took 69.77718043327332 seconds Epoch: 20 Training Loss: 3.901518 Validation Loss: 3.681802 Epoch 20: Training took 69.8636302947998 seconds Epoch: 21 Training Loss: 3.834341 Validation Loss: 3.654799 Epoch 21: Training took 70.22560214996338 seconds Epoch: 22 Training Loss: 3.804265 Validation Loss: 3.594644 Epoch 22: Training took 69.84251022338867 seconds Epoch: 23 Training Loss: 3.768740 Validation Loss: 3.563106 Epoch 23: Training took 70.34745907783508 seconds Epoch: 24 Training Loss: 3.728969 Validation Loss: 3.553688 Epoch 24: Training took 69.56226396560669 seconds Epoch: 25 Training Loss: 3.674473 Validation Loss: 3.593755 Epoch 25: Training took 69.71685719490051 seconds Epoch: 26 Training Loss: 3.640650 Validation Loss: 3.433748 Epoch 26: Training took 70.43414783477783 seconds Epoch: 27 Training Loss: 3.592997 Validation Loss: 3.464573 Epoch 27: Training took 69.88874888420105 seconds Epoch: 28 Training Loss: 3.550922 Validation Loss: 3.408271 Epoch 28: Training took 70.03864979743958 seconds Epoch: 29 Training Loss: 3.536067 Validation Loss: 3.506213 Epoch 29: Training took 69.55676126480103 seconds Epoch: 30 Training Loss: 3.493138 Validation Loss: 3.366031 Epoch 30: Training took 69.77691698074341 seconds Epoch: 31 Training Loss: 3.435407 Validation Loss: 3.355156 Epoch 31: Training took 70.06503438949585 seconds Epoch: 32 Training Loss: 3.403545 Validation Loss: 3.356795 Epoch 32: Training took 69.74691534042358 seconds Epoch: 33 Training Loss: 3.344033 Validation Loss: 3.291141 Epoch 33: Training took 70.44655513763428 seconds Epoch: 34 Training Loss: 3.305647 Validation Loss: 3.205185 Epoch 34: Training took 70.08437633514404 seconds Epoch: 35 Training Loss: 3.297650 Validation Loss: 3.114449 Epoch 35: Training took 69.70031571388245 seconds Epoch: 36 Training Loss: 3.224699 Validation Loss: 3.134883 Epoch 36: Training took 70.16658687591553 seconds Epoch: 37 Training Loss: 3.220128 Validation Loss: 3.150230 Epoch 37: Training took 69.77804708480835 seconds Epoch: 38 Training Loss: 3.189481 Validation Loss: 3.176592 Epoch 38: Training took 69.83911728858948 seconds Epoch: 39 Training Loss: 3.173848 Validation Loss: 3.099140 Epoch 39: Training took 70.45622420310974 seconds Epoch: 40 Training Loss: 3.127751 Validation Loss: 3.348422 Epoch 40: Training took 69.90028285980225 seconds Epoch: 41 Training Loss: 3.122090 Validation Loss: 3.095971 Epoch 41: Training took 70.4527096748352 seconds Epoch: 42 Training Loss: 3.105838 Validation Loss: 3.070101 Epoch 42: Training took 69.76174473762512 seconds Epoch: 43 Training Loss: 3.043892 Validation Loss: 3.093065 Epoch 43: Training took 69.82153701782227 seconds Epoch: 44 Training Loss: 3.050709 Validation Loss: 3.117655 Epoch 44: Training took 70.27519726753235 seconds Epoch: 45 Training Loss: 3.005664 Validation Loss: 3.070455 Epoch 45: Training took 69.71746063232422 seconds Epoch: 46 Training Loss: 2.988633 Validation Loss: 2.954870 Epoch 46: Training took 70.49746632575989 seconds Epoch: 47 Training Loss: 2.930274 Validation Loss: 2.942135 Epoch 47: Training took 70.0390567779541 seconds Epoch: 48 Training Loss: 2.933194 Validation Loss: 3.098191 Epoch 48: Training took 69.75210309028625 seconds Epoch: 49 Training Loss: 2.923639 Validation Loss: 3.050370 Epoch 49: Training took 70.1332802772522 seconds Epoch: 50 Training Loss: 2.894266 Validation Loss: 2.973337 Epoch 50: Training took 69.69635462760925 seconds Epoch: 51 Training Loss: 2.869646 Validation Loss: 2.916261 Epoch 51: Training took 70.38264322280884 seconds Epoch: 52 Training Loss: 2.847177 Validation Loss: 2.980206 Epoch 52: Training took 69.79083609580994 seconds Epoch: 53 Training Loss: 2.831323 Validation Loss: 2.880478 Epoch 53: Training took 69.86313319206238 seconds Epoch: 54 Training Loss: 2.815160 Validation Loss: 2.865181 Epoch 54: Training took 70.31022524833679 seconds Epoch: 55 Training Loss: 2.830531 Validation Loss: 2.928753 Epoch 55: Training took 69.66116762161255 seconds Epoch: 56 Training Loss: 2.799364 Validation Loss: 2.970462 Epoch 56: Training took 69.74475359916687 seconds Epoch: 57 Training Loss: 2.768929 Validation Loss: 2.963128 Epoch 57: Training took 70.04380655288696 seconds Epoch: 58 Training Loss: 2.799653 Validation Loss: 2.960480 Epoch 58: Training took 69.80249261856079 seconds Epoch: 59 Training Loss: 2.742073 Validation Loss: 2.935303 Epoch 59: Training took 70.12720012664795 seconds Epoch: 60 Training Loss: 2.732217 Validation Loss: 2.950641 Epoch 60: Training took 69.86221075057983 seconds Epoch: 61 Training Loss: 2.725677 Validation Loss: 3.083100 Epoch 61: Training took 69.62667536735535 seconds Epoch: 62 Training Loss: 2.667353 Validation Loss: 3.048120 Epoch 62: Training took 70.26820516586304 seconds Epoch: 63 Training Loss: 2.672403 Validation Loss: 2.959510 Epoch 63: Training took 69.90475630760193 seconds Epoch: 64 Training Loss: 2.637814 Validation Loss: 2.878934 Epoch 64: Training took 71.04594612121582 seconds Epoch: 65 Training Loss: 2.672304 Validation Loss: 3.027067 Epoch 65: Training took 69.77048921585083 seconds Epoch: 66 Training Loss: 2.625887 Validation Loss: 2.828470 Epoch 66: Training took 69.98621892929077 seconds Epoch: 67 Training Loss: 2.634559 Validation Loss: 2.892123 Epoch 67: Training took 70.60684847831726 seconds Epoch: 68 Training Loss: 2.603867 Validation Loss: 2.821200 Epoch 68: Training took 69.99006271362305 seconds Epoch: 69 Training Loss: 2.566658 Validation Loss: 2.928398 Epoch 69: Training took 70.38301348686218 seconds Epoch: 70 Training Loss: 2.574715 Validation Loss: 2.908247 Epoch 70: Training took 69.91914582252502 seconds Epoch: 71 Training Loss: 2.577531 Validation Loss: 2.906311 Epoch 71: Training took 69.91890668869019 seconds Epoch: 72 Training Loss: 2.605210 Validation Loss: 2.864525 Epoch 72: Training took 70.30046081542969 seconds Epoch: 73 Training Loss: 2.562033 Validation Loss: 3.058542 Epoch 73: Training took 69.82422137260437 seconds Epoch: 74 Training Loss: 2.528908 Validation Loss: 2.940002 Epoch 74: Training took 69.78545498847961 seconds ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 2.759033 Test Accuracy: 31% (265/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) # Freeze training for all layers for param in model_transfer.parameters(): param.requires_grad = False num_classes = 133 # total classes of dog breeds # replace the last linear layer so that it maps n_inputs -> 133 dog breed classes ## new layers automatically have requires_grad = True model_transfer.fc = nn.Linear(model_transfer.fc.in_features, num_classes) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ The steps I took are: 1) I created a pre-trained network from ResNet-50. 2) I froze parameters of all layers. 3) I replace the last fully-connected linear layer so that it maps n_inputs to the number of dog breed classes. This new layer automatically have requires_grad = True, and only this layer will be re-trained. The architecture is suitable for the curent problem since this is a small dataset and the images are similar to the dog images used to train the ResNet-50 pre-trained model. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch: 1 Training Loss: 4.838822 Validation Loss: 4.655106 Epoch 1: Training took 72.55707931518555 seconds Epoch: 2 Training Loss: 4.617789 Validation Loss: 4.393342 Epoch 2: Training took 73.00530982017517 seconds Epoch: 3 Training Loss: 4.425086 Validation Loss: 4.156457 Epoch 3: Training took 73.22258496284485 seconds Epoch: 4 Training Loss: 4.233319 Validation Loss: 3.937538 Epoch 4: Training took 72.90615200996399 seconds Epoch: 5 Training Loss: 4.057913 Validation Loss: 3.728640 Epoch 5: Training took 73.04576826095581 seconds Epoch: 6 Training Loss: 3.889202 Validation Loss: 3.490250 Epoch 6: Training took 72.67145943641663 seconds Epoch: 7 Training Loss: 3.725507 Validation Loss: 3.301069 Epoch 7: Training took 72.78318643569946 seconds Epoch: 8 Training Loss: 3.580069 Validation Loss: 3.121111 Epoch 8: Training took 73.59100413322449 seconds Epoch: 9 Training Loss: 3.424739 Validation Loss: 2.963184 Epoch 9: Training took 72.83080673217773 seconds Epoch: 10 Training Loss: 3.303581 Validation Loss: 2.793798 Epoch 10: Training took 72.95533895492554 seconds Epoch: 11 Training Loss: 3.171590 Validation Loss: 2.675051 Epoch 11: Training took 72.75558471679688 seconds Epoch: 12 Training Loss: 3.053910 Validation Loss: 2.512288 Epoch 12: Training took 72.61163544654846 seconds Epoch: 13 Training Loss: 2.948796 Validation Loss: 2.409395 Epoch 13: Training took 72.95003414154053 seconds Epoch: 14 Training Loss: 2.839710 Validation Loss: 2.257499 Epoch 14: Training took 72.68898844718933 seconds Epoch: 15 Training Loss: 2.746297 Validation Loss: 2.164205 Epoch 15: Training took 73.17858219146729 seconds Epoch: 16 Training Loss: 2.657723 Validation Loss: 2.076901 Epoch 16: Training took 72.92312693595886 seconds Epoch: 17 Training Loss: 2.566670 Validation Loss: 2.002176 Epoch 17: Training took 72.80235028266907 seconds Epoch: 18 Training Loss: 2.501037 Validation Loss: 1.900825 Epoch 18: Training took 72.98363161087036 seconds Epoch: 19 Training Loss: 2.435660 Validation Loss: 1.796759 Epoch 19: Training took 72.77762508392334 seconds Epoch: 20 Training Loss: 2.355902 Validation Loss: 1.754497 Epoch 20: Training took 73.27141332626343 seconds ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.729850 Test Accuracy: 74% (625/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) # Read bytes and store as an img. img = data_transform(img) # PyTorch pretrained models expect the Tensor dims to be (num input imgs, num color channels, height, width). # Currently however, we have (num color channels, height, width); let's fix this by inserting a new axis. img = img.unsqueeze(0) # Insert the new axis at index 0 i.e. in front of the other axes/dims. # Now that we have preprocessed our img, we need to convert it into a # Variable; PyTorch models expect inputs to be Variables. A PyTorch Variable is a # wrapper around a PyTorch Tensor. img = Variable(img).cuda() prediction = model_transfer(img) prediction = prediction.data.cpu().numpy().argmax() return class_names[prediction] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither img = Image.open(img_path) plt.imshow(img) plt.show() if dog_detector(img_path): breed = predict_breed_transfer(img_path) print(f"dog is detected in {img_path}. The predicted breed is {breed}.") return breed elif face_detector(img_path): breed = predict_breed_transfer(img_path) print(f"human is detected in {img_path}. The resembling dog breed is {breed}.") return breed else: raise Exception(f"{img_path} is neither detected as face nor dog. ") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) 1) Increate the number of epoches so that the network converges better. 2) Change the network structure, for example, use a deeper network. 3) Change the learning rate of the optimizer. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below import glob files = glob.glob('./myimages/*.jpg') for file in files: run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code # from google.colab import drive # drive.mount('/content/drive') # # accessing files in Gdrive # # with open('/content/drive/My Drive/Colab Notebooks/foo.txt', 'w') as f: # # f.write('Hello Google Drive!') # # !cat /content/drive/My\ Drive/foo.txt # import zipfile # with zipfile.ZipFile('/content/drive/My Drive/Colab Notebooks/data_multcloud/dogImages.zip', 'r') as zip_ref: # zip_ref.extractall('/content/data') # with zipfile.ZipFile('/content/drive/My Drive/Colab Notebooks/data_multcloud/lfw.zip', 'r') as zip_ref: # zip_ref.extractall('/content/data') import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/content/data/lfw/*/*")) dog_files = np.array(glob("/content/data/dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. detected_human1 = 0.0 for img_path in tqdm(human_files_short): detected_human1 += 1 if face_detector(img_path) else 0 detected_human2 = 0.0 for img_path in tqdm(dog_files_short): detected_human2 += 1 if face_detector(img_path) else 0 print("\n%age of human_files that have a detected human face: {}".\ format(detected_human1/len(human_files_short))) print("%age of dog_files that have a detected human face: {}".\ format(detected_human2/len(dog_files_short))) ###Output 100%|██████████| 100/100 [00:04<00:00, 20.25it/s] 100%|██████████| 100/100 [00:25<00:00, 6.25it/s] ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch # check if CUDA is available use_cuda = torch.cuda.is_available() print("use_cuda ",use_cuda) if use_cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image img = Image.open(img_path) # print("img shape:") img = transform(img) if use_cuda: img = img.cuda() scores = VGG16(img.unsqueeze_(0)) maxvalues, maxindices = scores.max(1) # print("maxvalues, maxindices: ",maxvalues, maxindices) return maxindices[0] # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred = VGG16_predict(img_path) # print("pred:",pred) return (pred >= 151 and pred <= 268) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. detected_dog1 = 0.0 for img_path in tqdm(human_files_short): detected_dog1 += 1 if dog_detector(img_path) else 0 detected_dog2 = 0.0 for img_path in tqdm(dog_files_short): detected_dog2 += 1 if dog_detector(img_path) else 0 print("\n%age of human_files that have a VGG detected dog face: {}".\ format(detected_dog1/len(human_files_short))) print("%age of dog_files that have a VGG detected dog face: {}".\ format(detected_dog2/len(dog_files_short))) ###Output 100%|██████████| 100/100 [00:00<00:00, 117.39it/s] 100%|██████████| 100/100 [00:01<00:00, 74.48it/s] ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from PIL import Image import torchvision.transforms as transforms from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_dir = '/content/data/dogImages/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') data_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) batch_size = 20 num_workers = 0 loaders_scratch = {} loaders_scratch['train'] = DataLoader(ImageFolder(train_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) # batch_iterator = iter(loaders_scratch['train']) # image, target = next(batch_iterator) # print((image, target)) loaders_scratch['valid'] = DataLoader(ImageFolder(valid_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) loaders_scratch['test'] = DataLoader(ImageFolder(test_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:I chose to crop image to 224*224 size because that is the size expected for input to VGG16, and then I converted it to pytorch tensor because that is the datatype needed by training code. Finally, I normalize each image tensor to keep pixel values within 0 to 1 so as to bound the loss while training. I decided to not augment dataset further if it achieves desired (> 10%) accuracy without augmentation. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # 118 classes of dog breeds (dictionary keys 151-268) # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3,64,3,padding=1) self.conv2 = nn.Conv2d(64,128,3,padding=1) self.conv3 = nn.Conv2d(128,256,3,padding=1) self.conv4 = nn.Conv2d(256,256,3,padding=1) self.pool = nn.MaxPool2d(2,stride=2) self.dropout = nn.Dropout(p=0.2) self.fc1 = nn.Linear(in_features=14*14*256, out_features=550, bias=True) self.fc2 = nn.Linear(in_features=550, out_features=133, bias=True) def forward(self, x): ## Define forward behavior x = F.relu(self.conv1(x)) # input 224*224*3, output 224*224*64 x = self.pool(x) # output 112*112*64 x = F.relu(self.conv2(x)) # output 112*112*128 x = self.pool(x) # output 56*56*128 x = F.relu(self.conv3(x)) # output 56*56*256 x = self.pool(x) # output 28*28*256 x = F.relu(self.conv4(x)) # output 28*28*256 x = self.pool(x) # output 14*14*256 x = x.view(-1, 14*14*256) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The idea here is to capture correlation within the high level features in the images of dogs. I started by looking at architecture of VGG16. Realizing that 13 conv layer and 3 dense layers contain a lot of paramters to train, I decided to reduce the number of parameters. Believing that 4 stages of convolutions should be enough to reach the desired level of features, I reduced number of layers - 4 conv layers and 2 dense layers. This change will drop the accuracy as compared to VGG16, but it will definitely improve speed of training. Latter is prefered because target accuracy is anything >10% (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.02) # CUDA_LAUNCH_BLOCKING=1 from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() outputs = model(data) # print("(data,outputs, target) ",(data.shape,outputs.shape, target.shape)) loss = criterion(outputs, target) loss.backward() optimizer.step() train_loss += ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss outputs = model(data) loss = criterion(outputs, target) valid_loss += ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(50, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 4.887174 Validation Loss: 4.878657 Epoch: 2 Training Loss: 4.869370 Validation Loss: 4.848549 Epoch: 3 Training Loss: 4.824730 Validation Loss: 4.797719 Epoch: 4 Training Loss: 4.746803 Validation Loss: 4.692183 Epoch: 5 Training Loss: 4.675761 Validation Loss: 4.670107 Epoch: 6 Training Loss: 4.628850 Validation Loss: 4.630784 Epoch: 7 Training Loss: 4.580145 Validation Loss: 4.524014 Epoch: 8 Training Loss: 4.512561 Validation Loss: 4.513649 Epoch: 9 Training Loss: 4.470942 Validation Loss: 4.435304 Epoch: 10 Training Loss: 4.397995 Validation Loss: 4.473947 Epoch: 11 Training Loss: 4.363097 Validation Loss: 4.415814 Epoch: 12 Training Loss: 4.288850 Validation Loss: 4.359576 Epoch: 13 Training Loss: 4.243631 Validation Loss: 4.291308 Epoch: 14 Training Loss: 4.185096 Validation Loss: 4.280042 Epoch: 15 Training Loss: 4.121061 Validation Loss: 4.210492 Epoch: 16 Training Loss: 4.049886 Validation Loss: 4.179674 Epoch: 17 Training Loss: 3.999778 Validation Loss: 4.131071 Epoch: 18 Training Loss: 3.933181 Validation Loss: 4.166828 Epoch: 19 Training Loss: 3.888325 Validation Loss: 4.160649 Epoch: 20 Training Loss: 3.834166 Validation Loss: 4.042470 Epoch: 21 Training Loss: 3.777239 Validation Loss: 4.070001 Epoch: 22 Training Loss: 3.717226 Validation Loss: 4.005573 Epoch: 23 Training Loss: 3.655286 Validation Loss: 4.084600 Epoch: 24 Training Loss: 3.628946 Validation Loss: 3.984413 Epoch: 25 Training Loss: 3.557290 Validation Loss: 3.964394 Epoch: 26 Training Loss: 3.518858 Validation Loss: 4.011021 Epoch: 27 Training Loss: 3.456288 Validation Loss: 3.960361 Epoch: 28 Training Loss: 3.398754 Validation Loss: 3.920190 Epoch: 29 Training Loss: 3.319044 Validation Loss: 3.990447 Epoch: 30 Training Loss: 3.268733 Validation Loss: 3.901505 Epoch: 31 Training Loss: 3.224926 Validation Loss: 3.914077 Epoch: 32 Training Loss: 3.133870 Validation Loss: 3.880004 Epoch: 33 Training Loss: 3.096562 Validation Loss: 4.026234 Epoch: 34 Training Loss: 2.990672 Validation Loss: 3.917436 Epoch: 35 Training Loss: 2.928252 Validation Loss: 3.980245 Epoch: 36 Training Loss: 2.897443 Validation Loss: 3.993963 Epoch: 37 Training Loss: 2.845973 Validation Loss: 3.943289 Epoch: 38 Training Loss: 2.777972 Validation Loss: 3.869338 Epoch: 39 Training Loss: 2.714061 Validation Loss: 3.962418 Epoch: 40 Training Loss: 2.647923 Validation Loss: 3.955304 Epoch: 41 Training Loss: 2.607114 Validation Loss: 4.001591 Epoch: 42 Training Loss: 2.582994 Validation Loss: 4.071705 Epoch: 43 Training Loss: 2.480551 Validation Loss: 4.014930 Epoch: 44 Training Loss: 2.454018 Validation Loss: 3.959421 Epoch: 45 Training Loss: 2.418326 Validation Loss: 4.021719 Epoch: 46 Training Loss: 2.357122 Validation Loss: 3.846569 Epoch: 47 Training Loss: 2.294032 Validation Loss: 4.164516 Epoch: 48 Training Loss: 2.263764 Validation Loss: 4.081131 Epoch: 49 Training Loss: 2.180786 Validation Loss: 4.279676 Epoch: 50 Training Loss: 2.183109 Validation Loss: 4.019083 ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) model.train() # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.736501 Test Accuracy: 17% (148/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ## TODO: Specify data loaders import os from PIL import Image import torchvision.transforms as transforms from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes data_dir = '/content/data/dogImages/' train_dir = os.path.join(data_dir, 'train/') valid_dir = os.path.join(data_dir, 'valid/') test_dir = os.path.join(data_dir, 'test/') data_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) batch_size = 20 num_workers = 0 loaders_transfer = {} loaders_transfer['train'] = DataLoader(ImageFolder(train_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) # batch_iterator = iter(loaders_scratch['train']) # image, target = next(batch_iterator) # print((image, target)) loaders_transfer['valid'] = DataLoader(ImageFolder(valid_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) loaders_transfer['test'] = DataLoader(ImageFolder(test_dir, transform=data_transform), batch_size=batch_size, num_workers=num_workers, shuffle=True) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.vgg16(pretrained=True) # model_transfer.load_state_dict(VGG16.state_dict()) # copy weights and stuff n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs,133) model_transfer.classifier[6] = last_layer if use_cuda: model_transfer = model_transfer.cuda() # Freeze training for all "features" layers for param in model_transfer.features.parameters(): param.requires_grad = False ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. *__Answer*:__ Dog breed dataset is relatively small and training data is similar to that for VGG. Therefore, according to table shown in lessons, I should follow the strategy of modifying and training a fully connected layer at the end of VGG16 network. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code import torch.optim as optim criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code n_epochs = 12 # found enough for > 70% accuracy # train the model model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 0.832985 Test Accuracy: 77% (644/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed img = Image.open(img_path) img = data_transform(img) if use_cuda: img = img.cuda() # cuda sensor to torch sensor scores = model_transfer(img.unsqueeze_(0)) maxvalues, maxindices = scores.max(1) return class_names[maxindices[0]] # predicted class index # return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path): print ("Human face. Closest resembling predicted dog-breed - {}".format(predict_breed_transfer(img_path))) elif dog_detector(img_path): print ("Dog face. Predicted breed - {}".format(predict_breed_transfer(img_path))) else: print("Neither human face nor dog in input ") return ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) Output is better than my expectations. Only 1 out of 4 dog predictions were wrong for algorithm. Three ways to imporove performance are:* Increase the number of breeds and images in training data for model_transfer * Augment the dataset before training the network (transforms.RandomHorizontalFlip(),transforms.ColorJitter,transforms.RandomRotation)* Increase the number of dense layers in the end ###Code # load filenames for human and dog images sample_files = np.array(glob("/content/samples/*")) for file in sample_files: img = cv2.imread(file) plt.imshow(img) plt.show() run_app(file) ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below # for file in np.hstack((human_files[:3], dog_files[:3])): # img = cv2.imread(file) # plt.imshow(img) # plt.show() # run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ According to my calculation below, 99.0% of humans' faces were identified, whereas 18.0% of dogs' faces were identified as human faces. ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def percent_detected(files,detector): count = 0 for f in files: if detector(f): count += 1 return count / len(files) human_rate = percent_detected(human_files_short,face_detector) dog_rate = percent_detected(dog_files_short,face_detector) print(100*human_rate,"% of humans were identified as such.") print(100*dog_rate,"% of dogs were misidentified as humans.") ###Output 99.0 % of humans were identified as such. 18.0 % of dogs were misidentified as humans. ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. # I skipped this step ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /Users/aaronplahn/.cache/torch/hub/checkpoints/vgg16-397923af.pth ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms from torchvision import datasets import torch.nn.functional as F # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]) ## Load and pre-process an image from the given img_path img = Image.open(img_path) img = transform(img) batch_img = torch.unsqueeze(img, 0) ## Return the *index* of the predicted class for that image VGG16.train(False) VGG16.eval() output = VGG16(batch_img) return torch.topk(output,1).indices.item() test_img_path = ('./dogImages/train/058.Dandie_dinmont_terrier/Dandie_dinmont_terrier_04110.jpg') VGG16_predict(test_img_path) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): start_dog_classes = 151 end_dog_classes = 268 prediction = VGG16_predict(img_path) return prediction >= start_dog_classes and prediction <= end_dog_classes is_a_dog = dog_detector(test_img_path) print('Test image was identified as a dog? ',is_a_dog) ###Output Test image was identified as a dog? True ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog?- What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ - None of th eimages in `human_files_short` has detected a dog.- 99.0% of the images in `dog_files_short` have detected dogs. ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_rate = percent_detected(human_files_short,dog_detector) dog_rate = percent_detected(dog_files_short,dog_detector) print(100*human_rate,"% of humans were misidentified as dogs.") print(100*dog_rate,"% of dogs were identified as dogs.") ###Output 0.0 % of humans were misidentified as dogs. 99.0 % of dogs were identified as dogs. ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. # I skipped this for now ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset # use %d and % to add some text print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 #import OpenCV import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() # load color (BGR) image img = cv2.imread(human_files[2]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. right = 0 wrong = 0 for img in human_files_short: human_chk = face_detector(img) if human_chk == True: right += 1 for img in dog_files_short: dog_chk = face_detector(img) if dog_chk == True: wrong += 1 print(right, '% of Human face detected in human_files') print(wrong, '% of Human of faces detected in dog_files') ###Output 98 % of Human face detected in human_files 17 % of Human of faces detected in dog_files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:06<00:00, 87021544.60it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): #open image img = Image.open(img_path) #convert img to VGG16 format using Crop and ToTensor transform_pipeline = transforms.Compose([transforms.RandomResizedCrop(250), transforms.ToTensor()]) #apply transform_pipeline func to image imgTensor = transform_pipeline(img) imgTensor = imgTensor.unsqueeze(0) #change dimension if torch.cuda.is_available(): imgTensor = imgTensor.cuda() pred = VGG16(imgTensor) #copy tensor to host memory if torch.cuda.is_available(): pred = pred.cpu() index = pred.data.numpy().argmax() #find class with best probability. return index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): index = VGG16_predict(img_path) return (151<= index and index <= 268) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code human_files_pred_dog = np.average([dog_detector(img) for img in human_files_short]) dog_files_correct = np.average([dog_detector(img) for img in dog_files_short]) print('Avg Humean predicted as Dog : {}'.format(human_files_pred_dog)) print('Avg Dog predicted correct : {}'.format(dog_files_correct)) ###Output Avg Humean predicted as Dog : 0.02 Avg Dog predicted correct : 0.74 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # covert image size and format to use it on network transform_pipeline = transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor()]) # datapath with transformation train_data = datasets.ImageFolder('/data/dog_images/train', transform=transform_pipeline) valid_data = datasets.ImageFolder('/data/dog_images/valid', transform=transform_pipeline) test_data = datasets.ImageFolder('/data/dog_images/test', transform=transform_pipeline) # set batch_size and subprocessor batch_size = 10 num_workers = 0 # define data loader train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) # drfine loaders_scratch loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ## visualize dataset with one batch import matplotlib.pyplot as plt %matplotlib inline # function to unnormalize and showimage def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # change image to numpy # plot image of one batch fig = plt.figure(figsize=(25, 4)) for idx in np.arange(10): ax = fig.add_subplot(2, 10, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:* I used **RandomResizedCrop** which set size parameter at 244 since this size works well with VGG16* I didn't chage the dataset since want to train model first with the given data. After that, I could argument the data and compare the result. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # there are 133 dog classes in dataset dog_classes = 133 # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Convolutional layer self.conv1 = nn.Conv2d(3, 32, 3, padding = 1) self.norm2d1 = nn.BatchNorm2d(32) # using BatchNorm2d to accelerate Deep Network training by reducing internal covariate shift self.conv2 = nn.Conv2d(32, 64, 3, padding = 1) self.conv3 = nn.Conv2d(64, 128, 3, padding = 1) ## Max pooling layer self.pool = nn.MaxPool2d(2, 2) ## Linear layer self.fc1 = nn.Linear(128 * 28 * 28, 500) #size_linear_layer = 500 self.fc2 = nn.Linear(500, dog_classes) ## Dropout layer self.dropout = nn.Dropout(0.25) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.norm2d1(self.conv1(x)))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) #print(x.shape) #tried torch.Size([20, 64, 30, 30]) -> fail ## Flatten the image x = x.view(-1, 128 * 28 * 28) ## 1st hidden layer with relu x = F.relu(self.fc1(x)) # 2nd hidden layer with relu x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() print(model_scratch) # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output Net( (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (norm2d1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (fc1): Linear(in_features=100352, out_features=500, bias=True) (fc2): Linear(in_features=500, out_features=133, bias=True) (dropout): Dropout(p=0.25) ) ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))activaiton = relu(norm2d1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)(conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))activaiton = relu(conv3): Conv3d(64, 128, kernel_size=(3, 3), stride=(1, 1, 1), padding=(1, 1, 1))activaiton = relu(pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(fc1): Linear(in_features=100352, out_features=500, bias=True)(fc2): Linear(in_features=500, out_features=133, bias=True)(dropout): Dropout(p=0.25)* Summary : 3 convolutional layer with Relu activation function. 1st layer I added BatchNorm to increase network training speed.* Input data size is 250 * 250 and it downsized to 28 * 28 with falttening.* And last fully connected layer is following that size fits to classes of dogs. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.01) if use_cuda: criterion_scratch = criterion_scratch.cuda() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() ## forward pass output = model(data) ## calculate the loss loss = criterion(output, target) ## backard pass loss.backward() ## optimizer optimizer.step() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...' .format(valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch: 1 Training Loss: 3.715722 Validation Loss: 4.033320 Validation loss decreased (inf --> 4.033320). Saving model ... Epoch: 2 Training Loss: 3.627105 Validation Loss: 4.058779 Epoch: 3 Training Loss: 3.545101 Validation Loss: 4.100022 Epoch: 4 Training Loss: 3.520368 Validation Loss: 4.050234 Epoch: 5 Training Loss: 3.469303 Validation Loss: 4.032757 Validation loss decreased (4.033320 --> 4.032757). Saving model ... Epoch: 6 Training Loss: 3.446717 Validation Loss: 3.986248 Validation loss decreased (4.032757 --> 3.986248). Saving model ... Epoch: 7 Training Loss: 3.421498 Validation Loss: 4.006217 Epoch: 8 Training Loss: 3.381927 Validation Loss: 3.978454 Validation loss decreased (3.986248 --> 3.978454). Saving model ... Epoch: 9 Training Loss: 3.361323 Validation Loss: 3.928846 Validation loss decreased (3.978454 --> 3.928846). Saving model ... Epoch: 10 Training Loss: 3.305882 Validation Loss: 4.005451 Epoch: 11 Training Loss: 3.293236 Validation Loss: 3.948993 Epoch: 12 Training Loss: 3.237764 Validation Loss: 3.976714 Epoch: 13 Training Loss: 3.179145 Validation Loss: 4.015687 Epoch: 14 Training Loss: 3.190542 Validation Loss: 3.996957 Epoch: 15 Training Loss: 3.086165 Validation Loss: 4.024975 Epoch: 16 Training Loss: 3.102103 Validation Loss: 4.002965 Epoch: 17 Training Loss: 3.083860 Validation Loss: 4.006400 Epoch: 18 Training Loss: 3.043343 Validation Loss: 4.009737 Epoch: 19 Training Loss: 3.015012 Validation Loss: 3.977869 Epoch: 20 Training Loss: 2.998595 Validation Loss: 3.884691 Validation loss decreased (3.928846 --> 3.884691). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.975350 Test Accuracy: 13% (111/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) # set requires_grad = false since goiung to use trained model for param in model_transfer.parameters(): param.requires_grad = False # modify it too apply given dataset model_transfer.fc = nn.Linear(2048, 133, bias=True) fc_parameters = model_transfer.fc.parameters() # set requires_grad = True to calculate gradient for param in fc_parameters: param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() ###Output Downloading: "https://download.pytorch.org/models/resnet50-19c8e357.pth" to /root/.torch/models/resnet50-19c8e357.pth 100%|██████████| 102502400/102502400 [00:06<00:00, 14728146.32it/s] ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I select ResNet50 which won 2015 ILSVRC competition. This model design to solve **Degradation probelm**. ResNet50 select **Residul learning (or mapping)** that is if network could become more sensitve after learning residual first.Resent50 consist fof 50 layer including bottlenack layer which encode featuremap to 1 * 1 feature map and decode it with 3 * 3 and 1 * 1 calculation. Except first layer is 7 * 7 convlotuional layer, all layers of ResNet50 use smaller than 3 * 3 kernel. Pooling is rarely used and set stride to 2 to reduce the size of featermap. If image become samller than half, using Identiy Blcok to resize to fit featruemap. This method also called **Projection shortcut** (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr = 0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() optimizer.zero_grad() ## forward pass output = model(data) ## calculate the loss loss = criterion(output, target) ## backard pass loss.backward() ## optimizer optimizer.step() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...' .format(valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') ###Output Epoch: 1 Training Loss: 4.771116 Validation Loss: 4.529074 Validation loss decreased (inf --> 4.529074). Saving model ... Epoch: 2 Training Loss: 4.452314 Validation Loss: 4.166428 Validation loss decreased (4.529074 --> 4.166428). Saving model ... Epoch: 3 Training Loss: 4.172248 Validation Loss: 3.886310 Validation loss decreased (4.166428 --> 3.886310). Saving model ... Epoch: 4 Training Loss: 3.907504 Validation Loss: 3.528552 Validation loss decreased (3.886310 --> 3.528552). Saving model ... Epoch: 5 Training Loss: 3.663867 Validation Loss: 3.289950 Validation loss decreased (3.528552 --> 3.289950). Saving model ... Epoch: 6 Training Loss: 3.439801 Validation Loss: 3.065610 Validation loss decreased (3.289950 --> 3.065610). Saving model ... Epoch: 7 Training Loss: 3.248495 Validation Loss: 2.844229 Validation loss decreased (3.065610 --> 2.844229). Saving model ... Epoch: 8 Training Loss: 3.084198 Validation Loss: 2.614779 Validation loss decreased (2.844229 --> 2.614779). Saving model ... Epoch: 9 Training Loss: 2.919839 Validation Loss: 2.488657 Validation loss decreased (2.614779 --> 2.488657). Saving model ... Epoch: 10 Training Loss: 2.771184 Validation Loss: 2.354540 Validation loss decreased (2.488657 --> 2.354540). Saving model ... Epoch: 11 Training Loss: 2.658674 Validation Loss: 2.169121 Validation loss decreased (2.354540 --> 2.169121). Saving model ... Epoch: 12 Training Loss: 2.517921 Validation Loss: 2.116398 Validation loss decreased (2.169121 --> 2.116398). Saving model ... Epoch: 13 Training Loss: 2.416801 Validation Loss: 1.996476 Validation loss decreased (2.116398 --> 1.996476). Saving model ... Epoch: 14 Training Loss: 2.345943 Validation Loss: 1.917447 Validation loss decreased (1.996476 --> 1.917447). Saving model ... Epoch: 15 Training Loss: 2.228709 Validation Loss: 1.766818 Validation loss decreased (1.917447 --> 1.766818). Saving model ... Epoch: 16 Training Loss: 2.170197 Validation Loss: 1.724935 Validation loss decreased (1.766818 --> 1.724935). Saving model ... Epoch: 17 Training Loss: 2.109937 Validation Loss: 1.683023 Validation loss decreased (1.724935 --> 1.683023). Saving model ... Epoch: 18 Training Loss: 2.031710 Validation Loss: 1.581914 Validation loss decreased (1.683023 --> 1.581914). Saving model ... Epoch: 19 Training Loss: 1.988067 Validation Loss: 1.553694 Validation loss decreased (1.581914 --> 1.553694). Saving model ... Epoch: 20 Training Loss: 1.939320 Validation Loss: 1.515497 Validation loss decreased (1.553694 --> 1.515497). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.569371 Test Accuracy: 68% (570/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code from PIL import Image import torchvision.transforms as transforms ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] data_transfer = loaders_transfer.copy() class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): global model_transfer global transform_pipeline # load the image and return the predicted breed image = Image.open(img_path).convert('RGB') image = transform_pipeline(image)[:3,:,:].unsqueeze(0) if use_cuda: model_transfer = model_transfer.cuda() image = image.cuda() model_transfer.eval() idx = torch.argmax(model_transfer(image)) return class_names[idx] print(os.listdir('/data/dog_images/test')[0:10]) for img_file in os.listdir('/data/dog_images/test/103.Mastiff'): img_path = os.path.join('/data/dog_images/test/103.Mastiff', img_file) prediction = predict_breed_transfer(img_path) print("image: {0}, \t prediction breed: {1}".format(img_path, prediction)) ###Output image: /data/dog_images/test/103.Mastiff/Mastiff_06873.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06815.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06878.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06825.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06836.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06847.jpg, prediction breed: Mastiff image: /data/dog_images/test/103.Mastiff/Mastiff_06827.jpg, prediction breed: Cane corso ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if face_detector(img_path) > 0: breed = predict_breed_transfer(img_path) print('Wow! This Human looks like a: ' + breed) elif dog_detector(img_path): breed = predict_breed_transfer(img_path) ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ * If I study more pre-trained network, I could try more transfer learning and compare the result to increase performance.* More epoches might hlep network train more times and increase performance.* If I apply image argumentation, the model could identify if the human or dog face appe ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:10], dog_files[:10])): run_app(file) ###Output Wow! This Human looks like a: Dachshund Wow! This Human looks like a: Dachshund Wow! This Human looks like a: American water spaniel Wow! This Human looks like a: Bull terrier Wow! This Human looks like a: German pinscher Wow! This Human looks like a: Basenji Wow! This Human looks like a: Dogue de bordeaux Wow! This Human looks like a: Basenji Wow! This Human looks like a: Basenji Wow! This Human looks like a: Basenji ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output _____no_output_____ ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output _____no_output_____ ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image return None # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. return None # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. ###Output _____no_output_____ ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN def forward(self, x): ## Define forward behavior return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = None ### TODO: select optimizer optimizer_scratch = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = None optimizer_transfer = None ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = # train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_performance = sum([face_detector(f) for f in human_files_short])/len(human_files_short)*100 dog_performance = sum([face_detector(f) for f in dog_files_short])/len(dog_files_short)*100 print(f"Human face detected in {human_performance}% of human files and {dog_performance}% of dog files") ###Output Human face detected in 98.0% of human files and 17.0% of dog files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|██████████| 553433881/553433881 [00:06<00:00, 87877537.14it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image = Image.open(img_path).convert('RGB') in_transform = transforms.Compose([ transforms.Resize(size=(244, 244)), transforms.ToTensor()]) image = in_transform(image)[:3,:,:].unsqueeze(0) if use_cuda: image = image.cuda() ret = VGG16(image) return torch.max(ret,1)[1].item() # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. i = VGG16_predict(img_path) return True if (i>=151)&(i<=268) else False # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_performance = sum([dog_detector(f) for f in human_files_short])/len(human_files_short)*100 dog_performance = sum([dog_detector(f) for f in dog_files_short])/len(dog_files_short)*100 print(f"Dog detected in {human_performance}% of human files and {dog_performance}% of dog files") ###Output Dog detected in 0.0% of human files and 97.0% of dog files ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes num_workers = 0 batch_size = 20 mean = [0.4766, 0.4527, 0.3926] std = [0.2275, 0.2224, 0.2210] transforms = {'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean, std) ]), 'test': transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean, std) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std) ])} train_data = datasets.ImageFolder('/data/dog_images/train', transform=transforms['train']) test_data = datasets.ImageFolder('/data/dog_images/test', transform=transforms['test']) val_data = datasets.ImageFolder('/data/dog_images/valid', transform=transforms['val']) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = { 'train': train_loader, 'valid': valid_loader, 'test': test_loader } ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: The code above preprocess training data by appling both resizing and augmenting transforms while test and validation data undergo resizing only. Randoming applying horizontal flips and rotations usually helps to avoid overfitting problems. All images are resized to 224x224 with center crop. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1) self.conv2 = nn.Conv2d(32, 64, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(64, 128, 3, padding=1) # max pooling layer self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(7*7*128,500) self.fc2 = nn.Linear(500, 133) self.dropout = nn.Dropout(.3) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = x.view(-1, 7*7*128) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ The net defined above is composed of three convolutional layers. The first two layers downsize the input image (224x224) by 2 with a kernel size of 3 and a stride of 2. Following, a maxpooling layer with a stride of 2 dowsize cause a further dowsize of 2. To the last convolutional layer, which does not produce any further dowsizing with a kernel size of 3 and a strid of 1, follows another maxpooling layer with stride 2 resulting in an output of 128 in depth. Dropout of 0.3 is applied before the first and second fully connected layers. The latter concludes the convolutional neural network by producing the final output predicting the class of dog breed. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.05) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) if batch_idx % 100 == 0: print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) valid_loss_min = valid_loss # return trained model return model # train the model # model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, # criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 3.463608 Test Accuracy: 17% (148/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders loaders_transfer = loaders_scratch.copy() ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.resnet50(pretrained=True) for param in model_transfer.parameters(): param.requires_grad = False model_transfer.fc = nn.Linear(2048, 133, bias=True) fc_parameters = model_transfer.fc.parameters() for param in fc_parameters: param.requires_grad = True if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ The code above makes use of ResNet50, a 50 layer deep convolutional neural network pretrained on images from ImageNet dataset. This network makes use of residual modules to achive a good performance across layers. By using pre-trained weights from ResNet50, it is possible to tranfer features learnt by ResNet and thus implement a more accurate model for dog breed classification than if developed from scratch relatively quickly. As ResNet50 has been trained on a large dataset, it is possible to apply its knowledge on our dog dataset by training the last few layers to make predictions of dog breeds only. In other words, it is possible to use early convolutional layers, which generally extract low-level features, of a pre-trained model and train later layers to identify spefici features, in this case dog breeds. To do so, the last fully-connected layer of ResNet50 has been replace with a new one with an output of 133 classes, corresponding to the number of dog breeds in the dataset. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.fc.parameters(), lr=0.001) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output Epoch 1, Batch 1 loss: 5.133555 Epoch 1, Batch 101 loss: 4.905063 Epoch 1, Batch 201 loss: 4.875894 Epoch 1, Batch 301 loss: 4.841925 Epoch: 1 Training Loss: 4.830521 Validation Loss: 4.693387 Validation loss decreased (inf --> 4.693387). Saving model ... Epoch 2, Batch 1 loss: 4.774242 Epoch 2, Batch 101 loss: 4.684658 Epoch 2, Batch 201 loss: 4.665756 Epoch 2, Batch 301 loss: 4.640277 Epoch: 2 Training Loss: 4.631534 Validation Loss: 4.501965 Validation loss decreased (4.693387 --> 4.501965). Saving model ... Epoch 3, Batch 1 loss: 4.621310 Epoch 3, Batch 101 loss: 4.517296 Epoch 3, Batch 201 loss: 4.491434 Epoch 3, Batch 301 loss: 4.469803 Epoch: 3 Training Loss: 4.459975 Validation Loss: 4.301536 Validation loss decreased (4.501965 --> 4.301536). Saving model ... Epoch 4, Batch 1 loss: 4.277418 Epoch 4, Batch 101 loss: 4.350959 Epoch 4, Batch 201 loss: 4.326886 Epoch 4, Batch 301 loss: 4.306923 Epoch: 4 Training Loss: 4.299499 Validation Loss: 4.118926 Validation loss decreased (4.301536 --> 4.118926). Saving model ... Epoch 5, Batch 1 loss: 4.236974 Epoch 5, Batch 101 loss: 4.175257 Epoch 5, Batch 201 loss: 4.156154 Epoch 5, Batch 301 loss: 4.144238 Epoch: 5 Training Loss: 4.138127 Validation Loss: 3.962235 Validation loss decreased (4.118926 --> 3.962235). Saving model ... Epoch 6, Batch 1 loss: 3.985020 Epoch 6, Batch 201 loss: 4.016633 Epoch 6, Batch 301 loss: 3.996942 Epoch: 6 Training Loss: 3.989564 Validation Loss: 3.801088 Validation loss decreased (3.962235 --> 3.801088). Saving model ... Epoch 7, Batch 1 loss: 3.880208 Epoch 7, Batch 101 loss: 3.873881 Epoch 7, Batch 201 loss: 3.862861 Epoch 7, Batch 301 loss: 3.834643 Epoch: 7 Training Loss: 3.832261 Validation Loss: 3.631874 Validation loss decreased (3.801088 --> 3.631874). Saving model ... Epoch 8, Batch 1 loss: 3.578899 Epoch 8, Batch 101 loss: 3.736552 Epoch 8, Batch 201 loss: 3.725370 Epoch 8, Batch 301 loss: 3.714131 Epoch: 8 Training Loss: 3.709746 Validation Loss: 3.476309 Validation loss decreased (3.631874 --> 3.476309). Saving model ... Epoch 9, Batch 1 loss: 3.572732 Epoch 9, Batch 101 loss: 3.573869 Epoch 9, Batch 201 loss: 3.566036 Epoch 9, Batch 301 loss: 3.564022 Epoch: 9 Training Loss: 3.560654 Validation Loss: 3.331898 Validation loss decreased (3.476309 --> 3.331898). Saving model ... Epoch 10, Batch 1 loss: 3.201677 Epoch 10, Batch 101 loss: 3.500328 Epoch 10, Batch 201 loss: 3.459558 Epoch 10, Batch 301 loss: 3.455949 Epoch: 10 Training Loss: 3.445706 Validation Loss: 3.204941 Validation loss decreased (3.331898 --> 3.204941). Saving model ... Epoch 11, Batch 1 loss: 3.675229 Epoch 11, Batch 101 loss: 3.365632 Epoch 11, Batch 201 loss: 3.364230 Epoch 11, Batch 301 loss: 3.348577 Epoch: 11 Training Loss: 3.345729 Validation Loss: 3.119454 Validation loss decreased (3.204941 --> 3.119454). Saving model ... Epoch 12, Batch 1 loss: 3.550156 Epoch 12, Batch 101 loss: 3.217167 Epoch 12, Batch 201 loss: 3.248455 Epoch 12, Batch 301 loss: 3.236417 Epoch: 12 Training Loss: 3.232540 Validation Loss: 2.971380 Validation loss decreased (3.119454 --> 2.971380). Saving model ... Epoch 13, Batch 1 loss: 3.118215 Epoch 13, Batch 101 loss: 3.130287 Epoch 13, Batch 201 loss: 3.136394 Epoch 13, Batch 301 loss: 3.120378 Epoch: 13 Training Loss: 3.116156 Validation Loss: 2.866872 Validation loss decreased (2.971380 --> 2.866872). Saving model ... Epoch 14, Batch 1 loss: 3.020964 Epoch 14, Batch 101 loss: 3.059361 Epoch 14, Batch 201 loss: 3.037999 Epoch 14, Batch 301 loss: 3.028959 Epoch: 14 Training Loss: 3.025121 Validation Loss: 2.770550 Validation loss decreased (2.866872 --> 2.770550). Saving model ... Epoch 15, Batch 1 loss: 2.907888 Epoch 15, Batch 101 loss: 2.953243 Epoch 15, Batch 201 loss: 2.939885 Epoch 15, Batch 301 loss: 2.929307 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 2.502897 Test Accuracy: 65% (547/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. import torchvision.transforms as transforms # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed image = Image.open(img_path).convert('RGB') prediction_transform = transforms.Compose([transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) image = prediction_transform(image)[:3,:,:].unsqueeze(0) model = model_transfer.cpu() model.eval() idx = torch.argmax(model(image)) return class_names[idx] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code from PIL import Image ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither img = Image.open(img_path) plt.imshow(img) plt.show() if dog_detector(img_path) is True: p = predict_breed_transfer(img_path) print(f"Hello, dog! \n You look like a {p} \n") elif face_detector(img_path) > 0: p = predict_breed_transfer(img_path) print(f"Hello, human! \n You look like a {p} \n") else: print("Error! Neither a human face or dog has been detected. \n") ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)The model seems to be working quite well with close-up images but struggle with far away shots. This might suggest more image augmentation is needed to achive better results. Also other point of improvemente could be:1. Add more hidden layers2. Increase dataset size 3. More hyper-parameter tunings ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____ ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:* Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dogImages`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("lfw/*/*")) dog_files = np.array(glob("dogImages/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. def human_detector(image_files, detector): ''' detect human face ''' image_results = [] for image_file in tqdm(image_files): image_results.append(detector(image_file)) image_results = np.array(image_results) return np.array(image_results) # human human_results = human_detector(human_files_short, face_detector) # dog dog_results = human_detector(dog_files_short, face_detector) # accuracy human_accuracy = human_results.sum() / len(human_results) * 100 dog_accuracy = dog_results.sum() / len(dog_results) * 100 ###Output _____no_output_____ ###Markdown Answer ###Code print('human: {0:.1f}%'.format(human_accuracy)) print('dog : {0:.1f}%'.format(dog_accuracy )) ###Output human: 97.0% dog : 6.0% ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of another face detection algorithm. ### Feel free to use as many code cells as needed. # returns "True" if face is detected in image stored at img_path def face_detector2(face_cascade): def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 return face_detector ###Output _____no_output_____ ###Markdown Using another haarcascades ###Code # detect human and dog using another haarcascade # harrcascade_frontalface_alt2 face_cascade_alt2 = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt2.xml') # frontalface_alt2 face_detector_alt2 = face_detector2(face_cascade_alt2) # human human_results_alt2 = human_detector(human_files_short, face_detector_alt2) # dog dog_results_alt2 = human_detector(dog_files_short, face_detector_alt2) # accuracy human_accuracy_alt2 = human_results_alt2.sum() / len(human_results_alt2) * 100 dog_accuracy_alt2 = dog_results_alt2.sum() / len(dog_results_alt2) * 100 print('human: {0:.1f}%'.format(human_accuracy_alt2)) print('dog : {0:.1f}%'.format(dog_accuracy_alt2)) ###Output human: 98.0% dog : 12.0% ###Markdown frontalface_alt AND frontalface_alt2 ###Code # accuracy human_accuracy_and = (human_results & human_results_alt2).sum() / len(human_results) * 100 dog_accuracy_and = (dog_results & dog_results_alt2).sum() / len(dog_results) * 100 print('human: {0:.1f}%'.format(human_accuracy_and )) print('dog : {0:.1f}%'.format(dog_accuracy_and)) ###Output human: 97.0% dog : 5.0% ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms # Set PIL to be tolerant of image files that are truncated. from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image VGG16.eval() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) # load image img = transform(Image.open(img_path)) if use_cuda: img = img.cuda() img = img.view(1, img.shape[0], img.shape[1], img.shape[2]) # add batch dimension pred = VGG16(img) return int(pred.argmax()) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. # prediction pred = VGG16_predict(img_path) # detect dog or not is_dog = False if pred >= 151 and pred <= 268: is_dog = True return is_dog # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. def dog_detect_accuracy(detector, images): ''' detect dog and calculate its accuracy ''' predicted = [] for img_path in tqdm(images): predicted.append(detector(img_path)) predicted = np.array(predicted) accuracy = predicted.sum() / predicted.size * 100 return accuracy human_accuracy_vgg = dog_detect_accuracy(dog_detector, human_files_short) dog_accuracy_vgg = dog_detect_accuracy(dog_detector, dog_files_short) ###Output 100%|██████████| 100/100 [00:03<00:00, 30.53it/s] ###Markdown Answer ###Code print('human: {0:.1f}%'.format(human_accuracy_vgg )) print('dog : {0:.1f}%'.format(dog_accuracy_vgg)) ###Output human: 0.0% dog : 100.0% ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Use ResNet-50 ###Code # define ResNet50 model ResNet50 = models.resnet50(pretrained=True) # move model to GPU if CUDA is available if use_cuda: ResNet50 = ResNet50.cuda() def ResNet_predict(img_path): ''' Use pre-trained ResNet-50 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to ResNet-50 model's prediction ''' ResNet50.eval() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) # load image img = transform(Image.open(img_path)) if use_cuda: img = img.cuda() img = img.view(1, img.shape[0], img.shape[1], img.shape[2]) # add batch dimension pred = ResNet50(img) return int(pred.argmax()) # predicted class index ### returns "True" if a dog is detected in the image stored at img_path def dog_detector_resnet(img_path): # prediction pred = ResNet_predict(img_path) # detect dog or not is_dog = False if pred >= 151 and pred <= 268: is_dog = True return is_dog # true/false human_accuracy_resnet = dog_detect_accuracy(dog_detector_resnet, human_files_short) dog_accuracy_resnet = dog_detect_accuracy(dog_detector_resnet, dog_files_short) # ResNet-50 print('human: {0:.1f}%'.format(human_accuracy_resnet)) print('dog : {0:.1f}%'.format(dog_accuracy_resnet)) ###Output human: 0.0% dog : 100.0% ###Markdown Use AlexNet ###Code # define AlexNet model AlexNet = models.alexnet(pretrained=True) # move model to GPU if CUDA is available if use_cuda: AlexNet = AlexNet.cuda() def AlexNet_predict(img_path): ''' Use pre-trained AlexNet-50 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to AlexNet-50 model's prediction ''' AlexNet.eval() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) # load image img = transform(Image.open(img_path)) if use_cuda: img = img.cuda() img = img.view(1, img.shape[0], img.shape[1], img.shape[2]) # add batch dimension pred = AlexNet(img) return int(pred.argmax()) # predicted class index ### returns "True" if a dog is detected in the image stored at img_path def dog_detector_alexnet(img_path): # prediction pred = AlexNet_predict(img_path) # detect dog or not is_dog = False if pred >= 151 and pred <= 268: is_dog = True return is_dog # true/false human_accuracy_alexnet = dog_detect_accuracy(dog_detector_alexnet, human_files_short) dog_accuracy_alexnet = dog_detect_accuracy(dog_detector_alexnet, dog_files_short) # AlexNet print('human: {0:.1f}%'.format(human_accuracy_alexnet)) print('dog : {0:.1f}%'.format(dog_accuracy_alexnet)) ###Output human: 1.0% dog : 100.0% ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # calculate mean and std transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), ]) dataset = datasets.ImageFolder('./dogImages/train/', transform=transform) means = [] stds = [] for img, label in tqdm(dataset): means.append([torch.mean(img[0]), torch.mean(img[1]), torch.mean(img[2])]) stds.append([torch.std(img[0]), torch.std(img[1]), torch.std(img[2])]) means = np.array(means) stds = np.array(stds) mean = [means[:,0].mean(), means[:, 1].mean(), means[:, 2].mean()] std = [stds[:, 0].mean(), stds[:, 1].mean(), stds[:, 2].mean()] # mean and std print('mean : ', mean) print('std : ', std) import numpy as np import os from tqdm import tqdm from torchvision import datasets import torch import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim use_cuda = torch.cuda.is_available() mean = [0.48640698, 0.45601872, 0.39183483] std = [0.23047441, 0.2256413, 0.22351243] # normalize normalize = transforms.Normalize(mean=mean, std=std) # train data transform_train = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(degrees=10), transforms.ToTensor(), normalize, ]) train_data = datasets.ImageFolder('./dogImages/train/', transform=transform_train) # validation data transform_valid = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) valid_data = datasets.ImageFolder('./dogImages/valid/', transform=transform_valid) # test data transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) test_data = datasets.ImageFolder('./dogImages/test/', transform=transform_test) ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:On Train data, Resize image 256 and crop center to 224 size.Then flip horizontal, rotate, and color jitter.When Test data, only resize and normalize. Because its Test, so dont augument the images. (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() # convolutional layer (3x224x224 -> 16x224x224) self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, padding=1) # pooling layer (16x224x224 -> 16x112x112) self.max1 = nn.MaxPool2d(kernel_size=2, stride=2) # batch normalization self.bn1 = nn.BatchNorm2d(16) # convolutional layer (16x112x112 -> 32x112x112) self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1) # pooling layer (32x112x112 -> 32x56x56) self.max2 = nn.MaxPool2d(kernel_size=2, stride=2) # batch normalization self.bn2 = nn.BatchNorm2d(32) # convolutional layer (32x56x56 -> 64x56x56) self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1) # pooling layer (64x56x56 -> 64x28x28) self.max3 = nn.MaxPool2d(kernel_size=2, stride=2) # batch normalization self.bn3 = nn.BatchNorm2d(64) # linear layer (64x28x28 -> 2048) self.fc1 = nn.Linear(64 * 28 * 28, 2048) # linear layer (2048 -> 133) self.fc2 = nn.Linear(2048, 133) # dropout layer (p=0.25) self.dropout = nn.Dropout(0.25) def forward(self, x): # add sequence of convolutional and max pooling layers # convolution 1 x = F.relu(self.conv1(x)) x = self.max1(x) x = self.bn1(x) # convolution 2 x = F.relu(self.conv2(x)) x = self.max2(x) x = self.bn2(x) # convolution 3 x = F.relu(self.conv3(x)) x = self.max3(x) x = self.bn3(x) # linear x = x.view(-1, 64 * 28 * 28) # flaften image input x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You do NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() def weights_init_uniform(m): ''' initialize weight to uniform ''' classname = m.__class__.__name__ if classname.find('Linear') != -1 or classname.find('Conv2d') != -1: torch.nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) # weight initialization model_scratch = model_scratch.apply(weights_init_uniform) ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ My network consists of 3 layers of convolution and 2 linear layers.Channel become 3, 16, 32 and height/width are 244, 112, 56, finally linear layer outputs 33 classes.For overfitting, after every convolution layer, using batch normalization and at linear layer using dropaout. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # batch size batch_size = 16 # number of subprocesses to use for data loading num_workers = 0 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_scratch = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} # the following import is required for training to be robust to truncated images from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf not_updated_count = 0 # count model is not updated for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in tqdm(enumerate(loaders['train'])): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # clear the gradients optimizer.zero_grad() # forward pass output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass loss.backward() # perform a single optimization step optimizer.step() # update training loss train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss # update the average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss not_updated_count = 0 else: not_updated_count += 1 # validation loss don't decrease anymore if not_updated_count > 5: print('Training sttopped') break # return trained model return model # train the model model_scratch = train(100, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output 418it [01:09, 6.00it/s] ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output Test Loss: 6.513329 Test Accuracy: 13% (111/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # train data transform_train = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(degrees=10), transforms.ToTensor(), normalize, ]) train_data = datasets.ImageFolder('./dogImages/train/', transform=transform_train) # validation data transform_valid = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) valid_data = datasets.ImageFolder('./dogImages/valid/', transform=transform_valid) # test data transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) test_data = datasets.ImageFolder('./dogImages/test/', transform=transform_test) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture model_transfer = models.alexnet(pretrained=True) # print out the model structure print(model_transfer) # Freeze training for all "features" layers for param in model_transfer.parameters(): param.requires_grad = False # replace last linear layer input output 1000 -> 133 n_inputs = model_transfer.classifier[6].in_features last_layer = nn.Linear(n_inputs, 133) model_transfer.classifier[6] = last_layer # print out the new model structure print(model_transfer) if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ I tried three type of Networks for Step 2, VGG16, ResNet-50 and AlexNet.They gave similarly good result in dog detection, but AlexNet is exceptionally Fast.So, I choosed AlexNet.The first step, I freeze all the layers of weights.Then replace final linear layer to class 133.And re-train only final layer. (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.parameters(), lr=0.01) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code batch_size=16 num_workers = 0 train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers, shuffle=False) loaders_transfer = {'train': train_loader, 'valid': valid_loader, 'test': test_loader} # train the model model_transfer = train(20, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output 418it [01:10, 5.93it/s] ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt', map_location=torch.device('cpu'))) test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output Test Loss: 1.146129 Test Accuracy: 69% (583/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code from PIL import Image import torchvision.transforms.functional as TF class_names = [item[4:].replace('_', ' ') for item in train_data.classes] ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in train_data.classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed data = Image.open(img_path) data = transform_test(data) data = data.view(1, 3, 224, 224) # move to GPU if use_cuda: data = data.cuda() # predict model_transfer.eval() output = model_transfer(data) output = F.softmax(output, dim=1) # convert output probabilities to predicted class _, preds_tensor = torch.max(output, 1) provability = output.max() provability = provability.cpu().detach().numpy() preds = np.squeeze(preds_tensor.numpy()) if not use_cuda else np.squeeze(preds_tensor.cpu().numpy()) return class_names[preds], provability # American water spaniel img_path = './images/American_water_spaniel_00648.jpg' name, prov = predict_breed_transfer(img_path) print('Expected : American water spaniel\nPredicted: {0}, {1:.3f}'.format(name, prov)) # Brittany img_path = './images/Brittany_02625.jpg' name, prov = predict_breed_transfer(img_path) print('Expected : Brittany\nPredicted: {0}, {1:.3f}'.format(name, prov)) # Curly-coated_retriever img_path = './images/Curly-coated_retriever_03896.jpg' name, prov = predict_breed_transfer(img_path) print('Expected : Curly-coated_retriever\nPredicted: {0}, {1:.3f}'.format(name, prov)) ###Output Expected : Curly-coated_retriever Predicted: Curly-coated retriever, 0.919 ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `dog_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither title_message = None detail_message = None # detect human or others if face_detector(img_path): title_message = 'Hello Human!' # detect dog name, prov = predict_breed_transfer(img_path) # if provability greater than 0.5, its seems a dog if prov > 0.25: title_message = 'Hello Dog!' if title_message is None: title_message = 'Hello Someone!' detail_message = 'You look like a "{0}" (score={1:.2f})'.format(name, prov) # display print(title_message) im = Image.open(img_path) plt.imshow(im) plt.show() print(detail_message) print('======================================') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)1. Dog classification accuracyMy dog classifire accuracy is 69% yet.I think it can get a little more accuracy by using another network and tuning hyperparameters, etc.Additionally, its mistake cat for dog and charactor for dog.2. Dog classification provabilitySome dog results low provability of classification.I should use more variation of dog images, for example get more dog images, using another augumentation.3. Human/Dog detectionI first use OpenCV to detect Human or not.Then I use Deep Learning to classificate dog.If not human and low provability of dog class, I defined is as other.I shoud construct a human/dog/other classifire first. ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. import glob for img_path in glob.glob('./test_images/*.jpg'): run_app(img_path) ###Output Hello Dog! ###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code ## all imports import os import numpy as np from glob import glob import cv2 import matplotlib.pyplot as plt %matplotlib inline from PIL import Image, ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True import torch from torch import nn, optim import torch.nn.functional as F from torchvision import datasets import torchvision.models as models import torchvision.transforms as transforms from collections import OrderedDict # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ 98.00% human detected in human files, and 17.00% human detected in dog files ###Code human_files_short = human_files[:100] dog_files_short = dog_files[:100] # TODO: Test the performance of the face_detector algorithm # on the images in human_files_short and dog_files_short. num_h = 0 num_d = 0 for human in human_files_short: if face_detector(human): num_h += 1 for dog in dog_files_short: if face_detector(dog): num_d += 1 print( " {:.2f}% human detected in human files, and {:.2f}% human detected in dog files ".format( num_h/len(human_files_short)*100, num_d/len(dog_files_short)*100 ) ) ###Output 98.00% human detected in human files, and 17.00% human detected in dog files ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output _____no_output_____ ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code def im_convert(tensor): """ Display a tensor as an image. """ image = tensor.to("cpu").clone().detach() image = image.numpy().squeeze() image = image.transpose(1,2,0) image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406)) image = image.clip(0, 1) return image def im_display(image): fig, ax1 = plt.subplots(figsize=(10, 5)) # content and style ims side-by-side ax1.imshow(im_convert(image)) def im_load(img_path, max_size): image = Image.open(img_path).convert('RGB') if max(image.size) > max_size: size = max_size else: size = max(image.size) in_transform = transforms.Compose([ transforms.Resize(size), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) image = in_transform(image).unsqueeze(0) return image # init model model_VGG16 = VGG16 criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model_VGG16.parameters(), lr = 0.001) def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image image_in = im_load(img_path, 224) output = model_VGG16(image_in) return torch.argmax(output) # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. pred = VGG16_predict(img_path).numpy() return (pred >= 151 and pred <= 268) # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ 1% dog detected in human_files_short, and 100% dog detected in dog_files_short ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. num_h = 0 num_d = 0 cnt = 0 for human in human_files_short: cnt += 1 if dog_detector(human): num_h += 1 for dog in dog_files_short: cnt += 1 if dog_detector(dog): num_d += 1 print( " {:.2f}% dog detected in human files, and {:.2f}% dog detected in dog files".format( num_h/len(human_files_short)*100, num_d/len(dog_files_short)*100 ) ) ###Output 0.00% dog detected in human files, and 100.00% dog detected in dog files ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes # data augmentation image_size = 128 train_transforms = transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform = transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # load image data dataset_train = datasets.ImageFolder('/data/dog_images/train', transform=train_transforms) dataset_valid = datasets.ImageFolder('/data/dog_images/valid', transform=transform) dataset_test = datasets.ImageFolder('/data/dog_images/test', transform=transform) # data loaders batch_size = 10 dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True) dataloader_valid = torch.utils.data.DataLoader(dataset_valid, batch_size=batch_size, shuffle=True) dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=True) loaders_scratch = {'train': dataloader_train, 'valid': dataloader_valid, 'test': dataloader_test} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**:- All datasets are center cropped to 128x128. This is a size that's sufficient enough to capture features, while retaining good compactness- Augmentation for the training dataset is done, via randomized rotation, cropping and flipping. Some samples of the augmented results were checked to make sure the randomness is not cause loss of features in images (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN # image input: 128x128x3 # 4 convolution layers self.conv1 = nn.Conv2d(3, 8, 3, padding=1, stride=1) self.conv2 = nn.Conv2d(8, 64, 3, padding=1, stride=1) self.conv3 = nn.Conv2d(64, 256, 3, padding=1, stride=1) self.conv4 = nn.Conv2d(256, 512, 3, padding=1, stride=1) # down-sampling by factor of 2 or 4: 128 ->32->8->4->2 self.maxpool2 = nn.MaxPool2d(2, 2) self.maxpool4 = nn.MaxPool2d(4, 4) # 3 Linear layer self.fc1 = nn.Linear(2**2*512, 512) self.fc2 = nn.Linear(512, 1024) self.fc3 = nn.Linear(1024, 133) # total# of classes = 133 according to https://knowledge.udacity.com/questions/60541 def forward(self, x): ## Define forward behavior # Convolutions network x = self.conv1(x) x = F.relu(x) x = self.maxpool4(x) x = self.conv2(x) x = F.relu(x) x = self.maxpool4(x) x = self.conv3(x) x = F.relu(x) x = self.maxpool2(x) x = self.conv4(x) x = F.relu(x) x = self.maxpool2(x) # flatten input before Linear layer x = x.view(x.size(0), -1) # Fully connected network x = self.fc1(x) x = F.relu(x) x = F.dropout(x, p=0.2) x = self.fc2(x) x = F.relu(x) x = F.dropout(x, p=0.2) x = self.fc3(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ __Convolution Network__- __The input depth of the convolution layer - equals 3 corresponding to image RGB format__ - conv1 input channel size can only be 3- __The input size is 128x128 given the data augmentation configuration, therefore:__ - Kernel size of 3x3 is chosen - To retain output size of each convolution layer, a padding of 1 and a stride of 1 are selected - To down-sample along the depth of convolution layers, maxpool is used with a factor of 2 and 4 - Result of the convolution layers can be calculated based on (W_in - F + 2P)/S + 1 and division factor of maxpool layers - With 4 convolution layers and 4 maxpool, the final convolution output size is 512x2x2 (features of images are extracted into depth) __Linear Network__- __The output size of the linear layer (fc3) - equals 133 classes__ - fc3 output channel size can only be 133- __The linear network has 3 layers with input of 2048 (512x2x2), and output of 133__ - A standard 3-layer structure is selected to interpret sufficient non-linear features from the convolution layers - Between each layer there is a dropout with 20% probability to prevent overfitting - ReLU function is used to have more stable gradient decent (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr=0.1) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like # reset gradients optimizer.zero_grad() output = model(data) loss = criterion_scratch(output, target) loss.backward() optimizer_scratch.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion_scratch(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(20, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') ###Output Epoch: 1 Training Loss: 3.492725 Validation Loss: 3.830873 Validation loss decreased (inf --> 3.830873). Saving model ... Epoch: 2 Training Loss: 3.435636 Validation Loss: 3.947403 Epoch: 3 Training Loss: 3.390325 Validation Loss: 3.692616 Validation loss decreased (3.830873 --> 3.692616). Saving model ... Epoch: 4 Training Loss: 3.312861 Validation Loss: 3.664873 Validation loss decreased (3.692616 --> 3.664873). Saving model ... Epoch: 5 Training Loss: 3.290631 Validation Loss: 3.741529 Epoch: 6 Training Loss: 3.245202 Validation Loss: 3.702006 Epoch: 7 Training Loss: 3.226642 Validation Loss: 3.660977 Validation loss decreased (3.664873 --> 3.660977). Saving model ... Epoch: 8 Training Loss: 3.209920 Validation Loss: 3.707680 Epoch: 9 Training Loss: 3.211429 Validation Loss: 3.663074 Epoch: 10 Training Loss: 3.212334 Validation Loss: 3.922034 Epoch: 11 Training Loss: 3.223460 Validation Loss: 3.930158 Epoch: 12 Training Loss: 3.216934 Validation Loss: 3.774217 Epoch: 13 Training Loss: 3.228151 Validation Loss: 4.054370 Epoch: 14 Training Loss: 3.377777 Validation Loss: 4.032840 Epoch: 15 Training Loss: 3.314188 Validation Loss: 3.844962 Epoch: 16 Training Loss: 3.375702 Validation Loss: 4.140683 Epoch: 17 Training Loss: 3.424538 Validation Loss: 3.909958 Epoch: 18 Training Loss: 3.499899 Validation Loss: 3.926710 Epoch: 19 Training Loss: 3.551004 Validation Loss: 3.907351 Epoch: 20 Training Loss: 3.578668 Validation Loss: 4.058715 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code # load the model that got the best validation accuracy model_scratch.load_state_dict(torch.load('model_scratch.pt')) def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('batch #:', batch_idx, '| correct: ', correct, end='\r') # if batch_idx > 10: break print('') print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output batch #: 83 | correct: 128.0 Test Loss: 3.703900 Test Accuracy: 15% (128/836) ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders # data augmentation image_size = 128 train_transforms = transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.RandomRotation(30), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) transform = transforms.Compose([ transforms.Resize(image_size), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # load image data dataset_train = datasets.ImageFolder('/data/dog_images/train', transform=train_transforms) dataset_valid = datasets.ImageFolder('/data/dog_images/valid', transform=transform) dataset_test = datasets.ImageFolder('/data/dog_images/test', transform=transform) # data loaders batch_size = 10 dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=batch_size, shuffle=True) dataloader_valid = torch.utils.data.DataLoader(dataset_valid, batch_size=batch_size, shuffle=True) dataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=batch_size, shuffle=True) loaders_transfer = {'train': dataloader_train, 'valid': dataloader_valid, 'test': dataloader_test} ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code ## TODO: Specify model architecture model_transfer = VGG16 for param in model_transfer.parameters(): param.requires_grad = False classifier_transfer = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(8192, 512)), ('relu', nn.ReLU()), ('dropout', nn.Dropout(0.2)), ('fc2', nn.Linear(512, 1024)), ('relu', nn.ReLU()), ('fc3', nn.Linear(1024, 133)) ])) model_transfer.classifier = classifier_transfer # check if CUDA is available use_cuda = torch.cuda.is_available() if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ __Architecture__- The CNN of the network utilizes VGG16 architecture, given that VGG16 has been pre-trained to detect dog breed features with a large dataset from ImageNet.- The classifier is replaced with a customized 3-layer linear network.__Steps__- Freeze CNN pre-trained network parameters to transfer the dog feature detection capability from VGG16- The classifier is defined to have a standard 3 layers since this setup performs with good backpropagation stability in the prior fully customized design- Classifier input channel size is 8192 to comply with the VGG16 CNN output- classifier output channel size is 133 for all dog breed classes- ReLU activation is used with a 20% dropout rate (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss() optimizer_transfer = optim.SGD(model_transfer.classifier.parameters(), lr = 0.001) # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt')) def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like # reset gradients optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## update the average validation loss output = model(data) loss = criterion(output, target) valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss <= valid_loss_min: print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model n_epochs = 20 model_transfer = train(n_epochs, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, use_cuda, 'model_transfer.pt') ###Output Epoch: 1 Training Loss: 0.676972 Validation Loss: 0.802606 Validation loss decreased (inf --> 0.802606). Saving model ... Epoch: 2 Training Loss: 0.642278 Validation Loss: 0.765266 Validation loss decreased (0.802606 --> 0.765266). Saving model ... Epoch: 3 Training Loss: 0.624442 Validation Loss: 0.778717 Epoch: 4 Training Loss: 0.626051 Validation Loss: 0.775374 Epoch: 5 Training Loss: 0.615363 Validation Loss: 0.787588 Epoch: 6 Training Loss: 0.581492 Validation Loss: 0.791289 Epoch: 7 Training Loss: 0.574814 Validation Loss: 0.799098 Epoch: 8 Training Loss: 0.536953 Validation Loss: 0.771792 Epoch: 9 Training Loss: 0.542160 Validation Loss: 0.754562 Validation loss decreased (0.765266 --> 0.754562). Saving model ... Epoch: 10 Training Loss: 0.519733 Validation Loss: 0.772971 Epoch: 11 Training Loss: 0.500314 Validation Loss: 0.781639 Epoch: 12 Training Loss: 0.499814 Validation Loss: 0.756972 Epoch: 13 Training Loss: 0.468367 Validation Loss: 0.782448 Epoch: 14 Training Loss: 0.458559 Validation Loss: 0.787003 Epoch: 15 Training Loss: 0.454069 Validation Loss: 0.757224 Epoch: 16 Training Loss: 0.446779 Validation Loss: 0.778518 Epoch: 17 Training Loss: 0.428292 Validation Loss: 0.774010 Epoch: 18 Training Loss: 0.430693 Validation Loss: 0.769264 Epoch: 19 Training Loss: 0.406600 Validation Loss: 0.755385 Epoch: 20 Training Loss: 0.409898 Validation Loss: 0.754279 Validation loss decreased (0.754562 --> 0.754279). Saving model ... ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code # load the model that got the best validation accuracy model_transfer.load_state_dict(torch.load('model_transfer.pt')) def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('batch #:', batch_idx, '| correct: ', correct, end='\r') print('') print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output batch #: 83 | correct: 652.0 Test Loss: 0.791529 Test Accuracy: 77% (652/836) ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in loaders_transfer['train'].dataset.classes] # load the model that got the best validation accuracy model_transfer.load_state_dict( torch.load('model_transfer.pt', map_location='cpu')) def predict_breed_transfer(img_path): # load the image and return the predicted breed image = im_load(img_path, 128) output = model_transfer(image) pred = output.data.max(1, keepdim=True)[1] return class_names[pred] ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code def im_convert(tensor): """ Display a tensor as an image. """ image = tensor.to("cpu").clone().detach() image = image.numpy().squeeze() image = image.transpose(1,2,0) image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406)) image = image.clip(0, 1) return image def im_load(img_path, max_size): image = Image.open(img_path).convert('RGB') if max(image.size) > max_size: size = max_size else: size = max(image.size) in_transform = transforms.Compose([ transforms.Resize(size), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) image = in_transform(image).unsqueeze(0) return image def im_display(image): fig, ax1 = plt.subplots(figsize=(10, 5)) # content and style ims side-by-side ax1.imshow(im_convert(image)) plt.show() def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 def dog_detector(img_path): pred = VGG16(im_load(img_path, 224)) pred = torch.argmax(pred).numpy() return (pred >= 151 and pred <= 268) # true/false ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither if dog_detector(img_path): breed = predict_breed_transfer(img_path) print('Hello dog!') im_display(im_load(img_path, 128)) print('You look like a... ', breed) elif face_detector(img_path): breed = predict_breed_transfer(img_path) print('Hello human!') im_display(im_load(img_path, 128)) print('You look like a... ', breed) else: print('Error: Neither human nor dog detected!') print(end='\n') ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ 1. The output is much better than expected2. Improvements: - Higher resolution for input image transform - More layers of classifier could be used - More nodes in each layer could be used3. Limitations: - These improvements however might also consume more computation power to train ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below rand = np.random.randint(0, 5000-3) numofpic = 3 for file in np.hstack((human_files[rand:rand+numofpic], dog_files[rand:rand+numofpic])): run_app(file) ###Output Hello human!
_notebooks/2021-01-15-error-handling.ipynb
###Markdown Error handling- hide: false- toc: false- comments: true- categories: [python] ###Code %config InlineBackend.figure_format = 'retina' %load_ext autoreload %autoreload 2 ###Output _____no_output_____ ###Markdown A brief reminder of different ways to handle errors in Python. Print message only ###Code def divide(a, b): try: return a / b except ZeroDivisionError as e: print(e) divide(4, 0) ###Output division by zero ###Markdown Raise original error (traceback and message) ###Code def divide(a, b): try: return a / b except ZeroDivisionError as e: raise e divide(4, 0) ###Output _____no_output_____ ###Markdown Raise different error type to be clearer that invalid value was supplied ###Code def divide(a, b): try: return a / b except ZeroDivisionError as e: raise ValueError('invalid inputs; division by zero is undefined') divide(4, 0) ###Output _____no_output_____
src/jupyter_notebooks/quantify_saver_vs_spender.ipynb
###Markdown Quantify the number of savers and spenders ###Code import pandas as pd import numpy as np from datetime import datetime import re from tqdm import tqdm import os df_money = pd.read_csv('../../data/asking_money_sample.csv') df_no_money = pd.read_csv('../../data/asking_no_money_sample.csv') df_some_money = pd.read_csv('../../data/asking_some_money_sample.csv') df = pd.concat([df_money, df_some_money, df_no_money], ignore_index=False) def convert_datetime(x): if not isinstance(x, str): return x # Ignore time zone info if 'T' in x and 'Z' in x: x = x.split('T')[0] try: temp = datetime.strptime(x, '%Y-%m-%d %H:%M:%S') except: try: temp = datetime.strptime(x, '%Y-%m-%d') except: try: temp = datetime.strptime(x, '%m/%d/%Y') except: try: temp = datetime.strptime(x, '%m/%d/%Y %H:%M:%S') except: temp = datetime.strptime(x, '%m/%d/%Y %H:%M') return temp # return 0 for neither saver or spender, 1 for saver, and 2 for spender def analyze_oc_date(url): path = '../data_mining/files/20190719_OpenCollective CSV' url_name = url.split('#')[0].rstrip() url_name = url_name.split('/')[-1] url_name = url_name.split(' ')[0] directory = os.fsencode(path) for file in os.listdir(directory): # Open csv file file = file.decode("utf-8") name = file.split('--')[0] if name == url_name.lower(): df_oc = pd.read_csv(path+'/'+file) df_oc['Transaction Date'] = df_oc['Transaction Date'].apply(convert_datetime) total_earning = df_oc[(df_oc['Transaction Amount'] > 0)& (df_oc['Transaction Date'] <= '2019-05-23')]['Transaction Amount'].sum() total_expense = df_oc[(df_oc['Transaction Amount'] < 0)& (df_oc['Transaction Date'] <= '2019-05-23')]['Transaction Amount'].sum()*-1 if total_expense < 0.25*total_earning: return 1 elif total_expense > 0.75*total_earning: return 2 else: return 0 return 0 dicts = df.to_dict('records') results = {'saver':0, 'spender':0, 'neither':0} for row in dicts: if not pd.isna(row['opencollective_url']): res = analyze_oc_date(row['opencollective_url']) if res == 1: print('saver:', row['slug']) results['saver'] += 1 elif res == 2: print('spender:', row['slug']) results['spender'] += 1 else: results['neither'] += 1 results ###Output _____no_output_____
010_finite_diff.ipynb
###Markdown Finite difference utilsSome simple utilities (and tests) for finite difference. ###Code import numpy as np import matplotlib.pyplot as plt from scipy.special import factorial # define functions for getting stencile def _get_dx_factors(num_points): """ returns the factors of dx for each sampled point. EG, f(x - dx) + f(x) + f(x + dx) would return -1, 0, 1 """ assert num_points % 2 == 1, 'must use an odd number of points!' forward = np.arange(1, num_points // 2 + 1) backward = - forward[::-1] return np.concatenate([backward, [0], forward]) #export def get_stencil(derivative: int, num_points: int, dx: float = 1.0) -> np.ndarray: """ Create a symetric stencile for a defined derivative with a certain number of points. """ dx_factors = _get_dx_factors(num_points) power = np.power.outer(dx_factors, np.arange(len(dx_factors))).T fact = np.atleast_2d(factorial(np.abs(dx_factors))) kernel = power / fact y = np.zeros(len(kernel)) y[derivative] = factorial(derivative) / (dx ** derivative) out = np.linalg.inv(kernel) @ y return out #export def apply_stencil(array, stencil): """Apply the stencil.""" center_ind = len(stencil) // 2 out = np.convolve(array, stencil[::-1], mode='same') return out ###Output _____no_output_____ ###Markdown Test the stencil for known inputs ###Code assert np.allclose(get_stencil(1, 3), [-0.5, 0, 0.5]) assert np.allclose(get_stencil(2, 3), [1, -2, 1]) ###Output _____no_output_____ ###Markdown Stencil TestsTest using numerical derivative with sin and first/second derivative.$$f = sin(ax + b)$$$$f' = a \cdot cos(ax + b)$$$$f'' = -a^2 \cdot sin(ax + b)$$ ###Code # x = np.linspace(0, 401, num=400) x = np.arange(0, 80) a = np.pi/16 b = 0 f = np.sin(a * x + b) fp = a * np.cos(a * x + b) fpp = -a**2 * np.sin(a * x + b) # calculate dx dx = np.mean(x[1:] - x[:-1]) ###Output _____no_output_____ ###Markdown Approximate first derivative ###Code first_stencil = get_stencil(1, 3, dx=dx) fp_estimated = apply_stencil(f, first_stencil) plt.plot(fp, label='analytical') plt.plot(fp_estimated, label='first order') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Approximate second derivative ###Code second_stencil = get_stencil(2, 3, dx=dx) fpp_estimated = apply_stencil(f, second_stencil) plt.plot(fpp, label='analytical') plt.plot(fpp_estimated, label='first order') plt.legend() ###Output _____no_output_____
B - A Closer Look at Word Embeddings.ipynb
###Markdown B - A Closer Look at Word EmbeddingsWe have very briefly covered how word embeddings (also known as word vectors) are used in the tutorials. In this appendix we'll have a closer look at these embeddings and find some (hopefully) interesting results.Embeddings transform a one-hot encoded vector (a vector that is 0 in elements except one, which is 1) into a much smaller dimension vector of real numbers. The one-hot encoded vector is also known as a *sparse vector*, whilst the real valued vector is known as a *dense vector*. The key concept in these word embeddings is that words that appear in similar _contexts_ appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. By context here, we mean the surrounding words, for example in the sentences "I purchased some items at the shop" and "I purchased some items at the store" the words 'shop' and 'store' appear in the same context and thus should be close together in vector space.You may have also heard about *word2vec*. *word2vec* is an algorithm (actually a bunch of algorithms) that calculates word vectors from a corpus. In this appendix we use *GloVe* vectors, *GloVe* being another algorithm to calculate word vectors. If you want to know how *word2vec* works, check out a two part series [here](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) and [here](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/), and if you want to find out more about *GloVe*, check the website [here](https://nlp.stanford.edu/projects/glove/).In PyTorch, we use word vectors with the `nn.Embedding` layer, which takes a _**[sentence length, batch size]**_ tensor and transforms it into a _**[sentence length, batch size, embedding dimensions]**_ tensor.In tutorial 2 onwards, we also used pre-trained word embeddings (specifically the GloVe vectors) provided in TorchText. These embeddings have been trained on a gigantic corpus. We can use these pre-trained vectors within any of our models, with the idea that as they have already learned the context of each word it gives us a better initial starting point for our word vectors. This usually leads to faster training time and/or improved accuracy.In this appendix we won't be training any models, instead we'll be looking at the word embeddings and finding a few interesting things about them.A lot of the code from the first half of this appendix is taken from [here](https://github.com/spro/practical-pytorch/blob/master/glove-word-vectors/glove-word-vectors.ipynb). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). Loading the GloVe vectorsFirst, we'll load the GloVe vectors. The `name` field specifies what the vectors have been trained on, here the `6B` means a corpus of 6 billion words. The `dim` argument specifies the dimensionality of the word vectors. GloVe vectors are available in 50, 100, 200 and 300 dimensions. There is also a `42B` and `840B` glove vectors, however they are only available at 300 dimensions. ###Code import torchtext.vocab glove = torchtext.vocab.GloVe(name='6B', dim=100) print(f'There are {len(glove.itos)} words in the vocabulary') ###Output There are 400000 words in the vocabulary ###Markdown As shown above, there are 400,000 unique words in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.**`glove.vectors` are the actual tensors containing the values of the embeddings. Each row is a word with each word having 100 columns. ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown We can see what the word associated with each row is by checking the `itos` (int to string) list. Below implies that row 0 is the vector associated with the word 'the', row 1 for ',' (comma), etc. ###Code glove.itos[:10] ###Output _____no_output_____ ###Markdown We can also use the `stoi` (string to int) dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. ###Code glove.stoi['the'] ###Output _____no_output_____ ###Markdown We can get the vector of a word by first getting the integer associated with it and then indexing into the word embedding tensor with that index. ###Code glove.vectors[glove.stoi['the']].shape ###Output _____no_output_____ ###Markdown We'll be doing this a lot, so we'll create a function that takes in word embeddings and a word and returns the associated vector. It'll also throw an error if the word doesn't exist in the vocabulary. ###Code def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] ###Output _____no_output_____ ###Markdown As before, we use a word to get the associated vector. ###Code get_vector(glove, 'the').shape ###Output _____no_output_____ ###Markdown Similar ContextsNow to start looking at the context of different words. If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary finding any vectors similar to this input word vector.The function below returns the closest 10 words to an input word vector: ###Code import torch def closest_words(embeddings, vector, n=10): distances = [(w, torch.dist(vector, get_vector(embeddings, w)).item()) for w in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] ###Output _____no_output_____ ###Markdown Let's try it out with 'korea'. The closest word is the word 'korea' itself (not very interesting), however all of the words are related in some way. Pyongyang is the capital of North Korea, DPRK is the official name of North Korea, etc.Interestingly, we also get 'Japan' and 'China'. These are countries, however they are also countries that are geographically near Korea. ###Code closest_words(glove, get_vector(glove, 'korea')) ###Output _____no_output_____ ###Markdown Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India, but Thailand and Malaysia are closer, could it be due to India and Australia appearing in the context of cricket matches together? ###Code closest_words(glove, get_vector(glove, 'india')) ###Output _____no_output_____ ###Markdown We'll also create another function that will nicely print out the tuples returned by our `closest_words` function. ###Code def print_tuples(tuples): for w, d in tuples: print(f'({d:02.04f}) {w}') ###Output _____no_output_____ ###Markdown A final word to look at, 'sports'. As we can see, the closest words are most of the sports themselves. ###Code print_tuples(closest_words(glove, get_vector(glove, 'sports'))) ###Output (0.0000) sports (3.5875) sport (4.4590) soccer (4.6508) basketball (4.6561) baseball (4.8028) sporting (4.8763) football (4.9624) professional (4.9824) entertainment (5.0975) media ###Markdown AnalogiesAnother property of word embeddings is that they can be operated on just as any standard vector and give interesting results.We'll show an example of this first, and then explain it: ###Code def analogy(embeddings, word1, word2, word3, n=5): candidate_words = closest_words(embeddings, get_vector(embeddings, word2) - get_vector(embeddings, word1) + get_vector(embeddings, word3), n+3) candidate_words = [x for x in candidate_words if x[0] not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words print_tuples(analogy(glove, 'man', 'king', 'woman')) ###Output man is to king as woman is to... (4.0811) queen (4.6429) monarch (4.9055) throne (4.9216) elizabeth (4.9811) prince ###Markdown This is the canonical example which shows off this propert of word embeddings. So why does it work? Why does the vector of 'woman' added to the vector of 'king' minus the vector of 'man' give us 'queen'?If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is queen!We can do this with other analogies too. For example, this gets an "acting career vector": ###Code print_tuples(analogy(glove, 'man', 'actor', 'woman')) ###Output man is to actor as woman is to... (2.8133) actress (5.0039) comedian (5.1399) actresses (5.2773) starred (5.3085) screenwriter ###Markdown For a "baby animal vector": ###Code print_tuples(analogy(glove, 'cat', 'kitten', 'dog')) ###Output cat is to kitten as dog is to... (3.8146) puppy (4.2944) rottweiler (4.5888) puppies (4.6086) pooch (4.6520) pug ###Markdown A "capital city vector": ###Code print_tuples(analogy(glove, 'france', 'paris', 'england')) ###Output france is to paris as england is to... (4.1426) london (4.4938) melbourne (4.7087) sydney (4.7630) perth (4.7952) birmingham ###Markdown A "musician's genre vector": ###Code print_tuples(analogy(glove, 'elvis', 'rock', 'eminem')) ###Output elvis is to rock as eminem is to... (5.6597) rap (6.2057) rappers (6.2161) rapper (6.2444) punk (6.2690) hop ###Markdown And an "ingredient vector": ###Code print_tuples(analogy(glove, 'beer', 'barley', 'wine')) ###Output beer is to barley as wine is to... (5.6021) grape (5.6760) beans (5.8174) grapes (5.9035) lentils (5.9454) figs ###Markdown Correcting Spelling MistakesVery recently, someone has found out that you can actually use word embeddings to correct spelling mistakes! We'll put their findings into code and briefly explain them, but to read more about this, check out the [original thread](http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411) and the associated [write-up](https://blog.usejournal.com/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26).First, we need to load up the much larger vocabulary GloVe vectors, this is due to the spelling mistakes not appearing in the smaller vocabulary. **Note**: these vectors are very large (~2GB), so watch out if you have a limited internet connection. ###Code glove = torchtext.vocab.GloVe(name='840B', dim=300) ###Output _____no_output_____ ###Markdown Checking the vocabulary size of these embeddings, we can see we now have over 2 million unique words in our vocabulary! ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown As the vectors were trained with a much larger vocabulary on a larger corpus of text, the words that appear are a little different. Notice how the words 'north', 'south', 'pyongyang' and 'dprk' no longer appear in the most closest words to 'korea'. ###Code print_tuples(closest_words(glove, get_vector(glove, 'korea'))) ###Output (0.0000) korea (3.9857) taiwan (4.4022) korean (4.9016) asia (4.9593) japan (5.0721) seoul (5.4058) thailand (5.6025) singapore (5.7010) russia (5.7240) hong ###Markdown Our first step to correcting spelling mistakes is looking at the vector for a misspelling of the word 'reliable'. ###Code print_tuples(closest_words(glove, get_vector(glove, 'relieable'))) ###Output (0.0000) relieable (5.0366) relyable (5.2610) realible (5.4719) realiable (5.5402) relable (5.5917) relaible (5.6412) reliabe (5.8802) relaiable (5.9593) stabel (5.9981) consitant ###Markdown Notice how the correct spelling of "reliable" does not appear in the top 10 closest words. Surely the misspellings of a word should appear next to the correct spelling of the word as they appear in the same context, right? The hypothesis is that misspellings of a certain word are all shifted away from the correct spelling. This is because articles of text that contain spelling mistakes are usually written in an informal manner (such as tweets/blog posts), thus spelling errors will appear together as they appear in context of informal articles.Similar to how we created analogies before, we can create a "correct spelling" vector. This time, instead of using a single example to create our vector, we'll use the average of multiple examples. This will hopefully give better accuracy!We first create a vector for the correct spelling, 'reliable', then calculate the difference between the "reliable vector" and each of the 8 misspellings of 'reliable'. ###Code reliable_vector = get_vector(glove, 'reliable') reliable_misspellings = ['relieable', 'relyable', 'realible', 'realiable', 'relable', 'relaible', 'reliabe', 'relaiable'] diff_reliable = [(reliable_vector - get_vector(glove, s)).unsqueeze(0) for s in reliable_misspellings] ###Output _____no_output_____ ###Markdown We take the average of these 8 'difference from reliable' vectors to get our "misspelling vector". ###Code misspelling_vector = torch.cat(diff_reliable, dim=0).mean(dim=0) ###Output _____no_output_____ ###Markdown We can now correct other spelling mistakes using this "misspelling vector" by finding the closest words to the sum of the vector of a misspelled word and the "misspelling vector".For a misspelling of "because": ###Code print_tuples(closest_words(glove, get_vector(glove, 'becuase') + misspelling_vector)) ###Output (6.1090) because (6.4250) even (6.4358) fact (6.4914) sure (6.5094) though (6.5601) obviously (6.5682) reason (6.5856) if (6.6099) but (6.6415) why ###Markdown For a misspelling of definitely: ###Code print_tuples(closest_words(glove, get_vector(glove, 'defintiely') + misspelling_vector)) ###Output (5.4070) definitely (5.5643) certainly (5.7192) sure (5.8152) well (5.8588) always (5.8812) also (5.9557) simply (5.9667) consider (5.9821) probably (5.9948) definately ###Markdown For a misspelling of consistent: ###Code print_tuples(closest_words(glove, get_vector(glove, 'consistant') + misspelling_vector)) ###Output (5.9641) consistent (6.3674) reliable (7.0195) consistant (7.0299) consistently (7.1605) accurate (7.2737) fairly (7.3037) good (7.3520) reasonable (7.3801) dependable (7.4027) ensure ###Markdown For a misspelling of package: ###Code print_tuples(closest_words(glove, get_vector(glove, 'pakage') + misspelling_vector)) ###Output (6.6117) package (6.9315) packages (7.0195) pakage (7.0911) comes (7.1241) provide (7.1469) offer (7.1861) reliable (7.2431) well (7.2434) choice (7.2453) offering ###Markdown B - A Closer Look at Word EmbeddingsWe have very briefly covered how word embeddings (also known as word vectors) are used in the tutorials. In this appendix we'll have a closer look at these embeddings and find some (hopefully) interesting results.Embeddings transform a one-hot encoded vector (a vector that is 0 in elements except one, which is 1) into a much smaller dimension vector of real numbers. The one-hot encoded vector is also known as a *sparse vector*, whilst the real valued vector is known as a *dense vector*. The key concept in these word embeddings is that words that appear in similar _contexts_ appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. By context here, we mean the surrounding words. For example in the sentences "I purchased some items at the shop" and "I purchased some items at the store" the words 'shop' and 'store' appear in the same context and thus should be close together in vector space.You may have also heard about *word2vec*. *word2vec* is an algorithm (actually a bunch of algorithms) that calculates word vectors from a corpus. In this appendix we use *GloVe* vectors, *GloVe* being another algorithm to calculate word vectors. If you want to know how *word2vec* works, check out a two part series [here](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) and [here](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/), and if you want to find out more about *GloVe*, check the website [here](https://nlp.stanford.edu/projects/glove/).In PyTorch, we use word vectors with the `nn.Embedding` layer, which takes a _**[sentence length, batch size]**_ tensor and transforms it into a _**[sentence length, batch size, embedding dimensions]**_ tensor.In tutorial 2 onwards, we also used pre-trained word embeddings (specifically the GloVe vectors) provided by TorchText. These embeddings have been trained on a gigantic corpus. We can use these pre-trained vectors within any of our models, with the idea that as they have already learned the context of each word they will give us a better starting point for our word vectors. This usually leads to faster training time and/or improved accuracy.In this appendix we won't be training any models, instead we'll be looking at the word embeddings and finding a few interesting things about them.A lot of the code from the first half of this appendix is taken from [here](https://github.com/spro/practical-pytorch/blob/master/glove-word-vectors/glove-word-vectors.ipynb). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). Loading the GloVe vectorsFirst, we'll load the GloVe vectors. The `name` field specifies what the vectors have been trained on, here the `6B` means a corpus of 6 billion words. The `dim` argument specifies the dimensionality of the word vectors. GloVe vectors are available in 50, 100, 200 and 300 dimensions. There is also a `42B` and `840B` glove vectors, however they are only available at 300 dimensions. ###Code import torchtext.vocab glove = torchtext.vocab.GloVe(name = '6B', dim = 100) print(f'There are {len(glove.itos)} words in the vocabulary') ###Output There are 400000 words in the vocabulary ###Markdown As shown above, there are 400,000 unique words in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.**`glove.vectors` is the actual tensor containing the values of the embeddings. ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown We can see what word is associated with each row by checking the `itos` (int to string) list. Below implies that row 0 is the vector associated with the word 'the', row 1 for ',' (comma), row 2 for '.' (period), etc. ###Code glove.itos[:10] ###Output _____no_output_____ ###Markdown We can also use the `stoi` (string to int) dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. ###Code glove.stoi['the'] ###Output _____no_output_____ ###Markdown We can get the vector of a word by first getting the integer associated with it and then indexing into the word embedding tensor with that index. ###Code glove.vectors[glove.stoi['the']].shape ###Output _____no_output_____ ###Markdown We'll be doing this a lot, so we'll create a function that takes in word embeddings and a word then returns the associated vector. It'll also throw an error if the word doesn't exist in the vocabulary. ###Code def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] ###Output _____no_output_____ ###Markdown As before, we use a word to get the associated vector. ###Code get_vector(glove, 'the').shape ###Output _____no_output_____ ###Markdown Similar ContextsNow to start looking at the context of different words. If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary calculating the distance between the vector of each word and our input word vector. We then sort these from closest to furthest away.The function below returns the closest 10 words to an input word vector: ###Code import torch def closest_words(embeddings, vector, n = 10): distances = [(word, torch.dist(vector, get_vector(embeddings, word)).item()) for word in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] ###Output _____no_output_____ ###Markdown Let's try it out with 'korea'. The closest word is the word 'korea' itself (not very interesting), however all of the words are related in some way. Pyongyang is the capital of North Korea, DPRK is the official name of North Korea, etc.Interestingly, we also get 'Japan' and 'China', implies that Korea, Japan and China are frequently talked about together in similar contexts. This makes sense as they are geographically situated near each other. ###Code word_vector = get_vector(glove, 'korea') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India (geographically), but Thailand and Malaysia are closer. So why is Australia closer to India in vector space? This is most probably due to India and Australia appearing in the context of [cricket](https://en.wikipedia.org/wiki/Cricket) matches together. ###Code word_vector = get_vector(glove, 'india') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown We'll also create another function that will nicely print out the tuples returned by our `closest_words` function. ###Code def print_tuples(tuples): for w, d in tuples: print(f'({d:02.04f}) {w}') ###Output _____no_output_____ ###Markdown A final word to look at, 'sports'. As we can see, the closest words are most of the sports themselves. ###Code word_vector = get_vector(glove, 'sports') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) sports (3.5875) sport (4.4590) soccer (4.6508) basketball (4.6561) baseball (4.8028) sporting (4.8763) football (4.9624) professional (4.9824) entertainment (5.0975) media ###Markdown AnalogiesAnother property of word embeddings is that they can be operated on just as any standard vector and give interesting results.We'll show an example of this first, and then explain it: ###Code def analogy(embeddings, word1, word2, word3, n=5): #get vectors for each word word1_vector = get_vector(embeddings, word1) word2_vector = get_vector(embeddings, word2) word3_vector = get_vector(embeddings, word3) #calculate analogy vector analogy_vector = word2_vector - word1_vector + word3_vector #find closest words to analogy vector candidate_words = closest_words(embeddings, analogy_vector, n+3) #filter out words already in analogy candidate_words = [(word, dist) for (word, dist) in candidate_words if word not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words print_tuples(analogy(glove, 'man', 'king', 'woman')) ###Output man is to king as woman is to... (4.0811) queen (4.6429) monarch (4.9055) throne (4.9216) elizabeth (4.9811) prince ###Markdown This is the canonical example which shows off this property of word embeddings. So why does it work? Why does the vector of 'woman' added to the vector of 'king' minus the vector of 'man' give us 'queen'?If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is a queen!We can do this with other analogies too. For example, this gets an "acting career vector": ###Code print_tuples(analogy(glove, 'man', 'actor', 'woman')) ###Output man is to actor as woman is to... (2.8133) actress (5.0039) comedian (5.1399) actresses (5.2773) starred (5.3085) screenwriter ###Markdown For a "baby animal vector": ###Code print_tuples(analogy(glove, 'cat', 'kitten', 'dog')) ###Output cat is to kitten as dog is to... (3.8146) puppy (4.2944) rottweiler (4.5888) puppies (4.6086) pooch (4.6520) pug ###Markdown A "capital city vector": ###Code print_tuples(analogy(glove, 'france', 'paris', 'england')) ###Output france is to paris as england is to... (4.1426) london (4.4938) melbourne (4.7087) sydney (4.7630) perth (4.7952) birmingham ###Markdown A "musician's genre vector": ###Code print_tuples(analogy(glove, 'elvis', 'rock', 'eminem')) ###Output elvis is to rock as eminem is to... (5.6597) rap (6.2057) rappers (6.2161) rapper (6.2444) punk (6.2690) hop ###Markdown And an "ingredient vector": ###Code print_tuples(analogy(glove, 'beer', 'barley', 'wine')) ###Output beer is to barley as wine is to... (5.6021) grape (5.6760) beans (5.8174) grapes (5.9035) lentils (5.9454) figs ###Markdown Correcting Spelling MistakesAnother interesting property of word embeddings is that they can actually be used to correct spelling mistakes! We'll put their findings into code and briefly explain them, but to read more about this, check out the [original thread](http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411) and the associated [write-up](https://blog.usejournal.com/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26).First, we need to load up the much larger vocabulary GloVe vectors, this is due to the spelling mistakes not appearing in the smaller vocabulary. **Note**: these vectors are very large (~2GB), so watch out if you have a limited internet connection. ###Code glove = torchtext.vocab.GloVe(name = '840B', dim = 300) ###Output _____no_output_____ ###Markdown Checking the vocabulary size of these embeddings, we can see we now have over 2 million unique words in our vocabulary! ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown As the vectors were trained with a much larger vocabulary on a larger corpus of text, the words that appear are a little different. Notice how the words 'north', 'south', 'pyongyang' and 'dprk' no longer appear in the most closest words to 'korea'. ###Code word_vector = get_vector(glove, 'korea') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) korea (3.9857) taiwan (4.4022) korean (4.9016) asia (4.9593) japan (5.0721) seoul (5.4058) thailand (5.6025) singapore (5.7010) russia (5.7240) hong ###Markdown Our first step to correcting spelling mistakes is looking at the vector for a misspelling of the word 'reliable'. ###Code word_vector = get_vector(glove, 'relieable') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) relieable (5.0366) relyable (5.2610) realible (5.4719) realiable (5.5402) relable (5.5917) relaible (5.6412) reliabe (5.8802) relaiable (5.9593) stabel (5.9981) consitant ###Markdown Notice how the correct spelling, "reliable", does not appear in the top 10 closest words. Surely the misspellings of a word should appear next to the correct spelling of the word as they appear in the same context, right? The hypothesis is that misspellings of words are all equally shifted away from their correct spelling. This is because articles of text that contain spelling mistakes are usually written in an informal manner where correct spelling doesn't matter as much (such as tweets/blog posts), thus spelling errors will appear together as they appear in context of informal articles.Similar to how we created analogies before, we can create a "correct spelling" vector. This time, instead of using a single example to create our vector, we'll use the average of multiple examples. This will hopefully give better accuracy!We first create a vector for the correct spelling, 'reliable', then calculate the difference between the "reliable vector" and each of the 8 misspellings of 'reliable'. As we are going to concatenate these 8 misspelling tensors together we need to unsqueeze a "batch" dimension to them. ###Code reliable_vector = get_vector(glove, 'reliable') reliable_misspellings = ['relieable', 'relyable', 'realible', 'realiable', 'relable', 'relaible', 'reliabe', 'relaiable'] diff_reliable = [(reliable_vector - get_vector(glove, s)).unsqueeze(0) for s in reliable_misspellings] ###Output _____no_output_____ ###Markdown We take the average of these 8 'difference from reliable' vectors to get our "misspelling vector". ###Code misspelling_vector = torch.cat(diff_reliable, dim = 0).mean(dim = 0) ###Output _____no_output_____ ###Markdown We can now correct other spelling mistakes using this "misspelling vector" by finding the closest words to the sum of the vector of a misspelled word and the "misspelling vector".For a misspelling of "because": ###Code word_vector = get_vector(glove, 'becuase') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.1090) because (6.4250) even (6.4358) fact (6.4914) sure (6.5094) though (6.5601) obviously (6.5682) reason (6.5856) if (6.6099) but (6.6415) why ###Markdown For a misspelling of "definitely": ###Code word_vector = get_vector(glove, 'defintiely') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.4070) definitely (5.5643) certainly (5.7192) sure (5.8152) well (5.8588) always (5.8812) also (5.9557) simply (5.9667) consider (5.9821) probably (5.9948) definately ###Markdown For a misspelling of "consistent": ###Code word_vector = get_vector(glove, 'consistant') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.9641) consistent (6.3674) reliable (7.0195) consistant (7.0299) consistently (7.1605) accurate (7.2737) fairly (7.3037) good (7.3520) reasonable (7.3801) dependable (7.4027) ensure ###Markdown For a misspelling of "package": ###Code word_vector = get_vector(glove, 'pakage') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.6117) package (6.9315) packages (7.0195) pakage (7.0911) comes (7.1241) provide (7.1469) offer (7.1861) reliable (7.2431) well (7.2434) choice (7.2453) offering ###Markdown B - A Closer Look at Word EmbeddingsWe have very briefly covered how word embeddings (also known as word vectors) are used in the tutorials. In this appendix we'll have a closer look at these embeddings and find some (hopefully) interesting results.Embeddings transform a one-hot encoded vector (a vector that is 0 in elements except one, which is 1) into a much smaller dimension vector of real numbers. The one-hot encoded vector is also known as a *sparse vector*, whilst the real valued vector is known as a *dense vector*. The key concept in these word embeddings is that words that appear in similar _contexts_ appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. By context here, we mean the surrounding words. For example in the sentences "I purchased some items at the shop" and "I purchased some items at the store" the words 'shop' and 'store' appear in the same context and thus should be close together in vector space.You may have also heard about *word2vec*. *word2vec* is an algorithm (actually a bunch of algorithms) that calculates word vectors from a corpus. In this appendix we use *GloVe* vectors, *GloVe* being another algorithm to calculate word vectors. If you want to know how *word2vec* works, check out a two part series [here](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) and [here](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/), and if you want to find out more about *GloVe*, check the website [here](https://nlp.stanford.edu/projects/glove/).In PyTorch, we use word vectors with the `nn.Embedding` layer, which takes a _**[sentence length, batch size]**_ tensor and transforms it into a _**[sentence length, batch size, embedding dimensions]**_ tensor.In tutorial 2 onwards, we also used pre-trained word embeddings (specifically the GloVe vectors) provided by TorchText. These embeddings have been trained on a gigantic corpus. We can use these pre-trained vectors within any of our models, with the idea that as they have already learned the context of each word they will give us a better starting point for our word vectors. This usually leads to faster training time and/or improved accuracy.In this appendix we won't be training any models, instead we'll be looking at the word embeddings and finding a few interesting things about them.A lot of the code from the first half of this appendix is taken from [here](https://github.com/spro/practical-pytorch/blob/master/glove-word-vectors/glove-word-vectors.ipynb). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). Loading the GloVe vectorsFirst, we'll load the GloVe vectors. The `name` field specifies what the vectors have been trained on, here the `6B` means a corpus of 6 billion words. The `dim` argument specifies the dimensionality of the word vectors. GloVe vectors are available in 50, 100, 200 and 300 dimensions. There is also a `42B` and `840B` glove vectors, however they are only available at 300 dimensions. ###Code import torchtext.vocab glove = torchtext.vocab.GloVe(name='6B', dim=100) print(f'There are {len(glove.itos)} words in the vocabulary') ###Output There are 400000 words in the vocabulary ###Markdown As shown above, there are 400,000 unique words in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.**`glove.vectors` is the actual tensor containing the values of the embeddings. ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown We can see what word is associated with each row by checking the `itos` (int to string) list. Below implies that row 0 is the vector associated with the word 'the', row 1 for ',' (comma), row 2 for '.' (period), etc. ###Code glove.itos[:10] ###Output _____no_output_____ ###Markdown We can also use the `stoi` (string to int) dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. ###Code glove.stoi['the'] ###Output _____no_output_____ ###Markdown We can get the vector of a word by first getting the integer associated with it and then indexing into the word embedding tensor with that index. ###Code glove.vectors[glove.stoi['the']].shape ###Output _____no_output_____ ###Markdown We'll be doing this a lot, so we'll create a function that takes in word embeddings and a word and returns the associated vector. It'll also throw an error if the word doesn't exist in the vocabulary. ###Code def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] ###Output _____no_output_____ ###Markdown As before, we use a word to get the associated vector. ###Code get_vector(glove, 'the').shape ###Output _____no_output_____ ###Markdown Similar ContextsNow to start looking at the context of different words. If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary finding any vectors similar to this input word vector.The function below returns the closest 10 words to an input word vector: ###Code import torch def closest_words(embeddings, vector, n=10): distances = [(w, torch.dist(vector, get_vector(embeddings, w)).item()) for w in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] ###Output _____no_output_____ ###Markdown Let's try it out with 'korea'. The closest word is the word 'korea' itself (not very interesting), however all of the words are related in some way. Pyongyang is the capital of North Korea, DPRK is the official name of North Korea, etc.Interestingly, we also get 'Japan' and 'China', implies that Korea, Japan and China are frequently talked about together in similar contexts. This makes sense as they are geographically situated near each other. ###Code closest_words(glove, get_vector(glove, 'korea')) ###Output _____no_output_____ ###Markdown Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India (geographically), but Thailand and Malaysia are closer. So why is Australia closer to India in vector space? This is most probably due to India and Australia appearing in the context of [cricket](https://en.wikipedia.org/wiki/Cricket) matches together. ###Code closest_words(glove, get_vector(glove, 'india')) ###Output _____no_output_____ ###Markdown We'll also create another function that will nicely print out the tuples returned by our `closest_words` function. ###Code def print_tuples(tuples): for w, d in tuples: print(f'({d:02.04f}) {w}') ###Output _____no_output_____ ###Markdown A final word to look at, 'sports'. As we can see, the closest words are most of the sports themselves. ###Code print_tuples(closest_words(glove, get_vector(glove, 'sports'))) ###Output (0.0000) sports (3.5875) sport (4.4590) soccer (4.6508) basketball (4.6561) baseball (4.8028) sporting (4.8763) football (4.9624) professional (4.9824) entertainment (5.0975) media ###Markdown AnalogiesAnother property of word embeddings is that they can be operated on just as any standard vector and give interesting results.We'll show an example of this first, and then explain it: ###Code def analogy(embeddings, word1, word2, word3, n=5): candidate_words = closest_words(embeddings, get_vector(embeddings, word2) - get_vector(embeddings, word1) + get_vector(embeddings, word3), n+3) candidate_words = [x for x in candidate_words if x[0] not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words print_tuples(analogy(glove, 'man', 'king', 'woman')) ###Output man is to king as woman is to... (4.0811) queen (4.6429) monarch (4.9055) throne (4.9216) elizabeth (4.9811) prince ###Markdown This is the canonical example which shows off this property of word embeddings. So why does it work? Why does the vector of 'woman' added to the vector of 'king' minus the vector of 'man' give us 'queen'?If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is a queen!We can do this with other analogies too. For example, this gets an "acting career vector": ###Code print_tuples(analogy(glove, 'man', 'actor', 'woman')) ###Output man is to actor as woman is to... (2.8133) actress (5.0039) comedian (5.1399) actresses (5.2773) starred (5.3085) screenwriter ###Markdown For a "baby animal vector": ###Code print_tuples(analogy(glove, 'cat', 'kitten', 'dog')) ###Output cat is to kitten as dog is to... (3.8146) puppy (4.2944) rottweiler (4.5888) puppies (4.6086) pooch (4.6520) pug ###Markdown A "capital city vector": ###Code print_tuples(analogy(glove, 'france', 'paris', 'england')) ###Output france is to paris as england is to... (4.1426) london (4.4938) melbourne (4.7087) sydney (4.7630) perth (4.7952) birmingham ###Markdown A "musician's genre vector": ###Code print_tuples(analogy(glove, 'elvis', 'rock', 'eminem')) ###Output elvis is to rock as eminem is to... (5.6597) rap (6.2057) rappers (6.2161) rapper (6.2444) punk (6.2690) hop ###Markdown And an "ingredient vector": ###Code print_tuples(analogy(glove, 'beer', 'barley', 'wine')) ###Output beer is to barley as wine is to... (5.6021) grape (5.6760) beans (5.8174) grapes (5.9035) lentils (5.9454) figs ###Markdown Correcting Spelling MistakesVery recently, someone has found out that you can actually use word embeddings to correct spelling mistakes! We'll put their findings into code and briefly explain them, but to read more about this, check out the [original thread](http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411) and the associated [write-up](https://blog.usejournal.com/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26).First, we need to load up the much larger vocabulary GloVe vectors, this is due to the spelling mistakes not appearing in the smaller vocabulary. **Note**: these vectors are very large (~2GB), so watch out if you have a limited internet connection. ###Code glove = torchtext.vocab.GloVe(name='840B', dim=300) ###Output _____no_output_____ ###Markdown Checking the vocabulary size of these embeddings, we can see we now have over 2 million unique words in our vocabulary! ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown As the vectors were trained with a much larger vocabulary on a larger corpus of text, the words that appear are a little different. Notice how the words 'north', 'south', 'pyongyang' and 'dprk' no longer appear in the most closest words to 'korea'. ###Code print_tuples(closest_words(glove, get_vector(glove, 'korea'))) ###Output (0.0000) korea (3.9857) taiwan (4.4022) korean (4.9016) asia (4.9593) japan (5.0721) seoul (5.4058) thailand (5.6025) singapore (5.7010) russia (5.7240) hong ###Markdown Our first step to correcting spelling mistakes is looking at the vector for a misspelling of the word 'reliable'. ###Code print_tuples(closest_words(glove, get_vector(glove, 'relieable'))) ###Output (0.0000) relieable (5.0366) relyable (5.2610) realible (5.4719) realiable (5.5402) relable (5.5917) relaible (5.6412) reliabe (5.8802) relaiable (5.9593) stabel (5.9981) consitant ###Markdown Notice how the correct spelling of "reliable" does not appear in the top 10 closest words. Surely the misspellings of a word should appear next to the correct spelling of the word as they appear in the same context, right? The hypothesis is that misspellings of a certain word are all shifted away from the correct spelling. This is because articles of text that contain spelling mistakes are usually written in an informal manner (such as tweets/blog posts), thus spelling errors will appear together as they appear in context of informal articles.Similar to how we created analogies before, we can create a "correct spelling" vector. This time, instead of using a single example to create our vector, we'll use the average of multiple examples. This will hopefully give better accuracy!We first create a vector for the correct spelling, 'reliable', then calculate the difference between the "reliable vector" and each of the 8 misspellings of 'reliable'. ###Code reliable_vector = get_vector(glove, 'reliable') reliable_misspellings = ['relieable', 'relyable', 'realible', 'realiable', 'relable', 'relaible', 'reliabe', 'relaiable'] diff_reliable = [(reliable_vector - get_vector(glove, s)).unsqueeze(0) for s in reliable_misspellings] ###Output _____no_output_____ ###Markdown We take the average of these 8 'difference from reliable' vectors to get our "misspelling vector". ###Code misspelling_vector = torch.cat(diff_reliable, dim=0).mean(dim=0) ###Output _____no_output_____ ###Markdown We can now correct other spelling mistakes using this "misspelling vector" by finding the closest words to the sum of the vector of a misspelled word and the "misspelling vector".For a misspelling of "because": ###Code print_tuples(closest_words(glove, get_vector(glove, 'becuase') + misspelling_vector)) ###Output (6.1090) because (6.4250) even (6.4358) fact (6.4914) sure (6.5094) though (6.5601) obviously (6.5682) reason (6.5856) if (6.6099) but (6.6415) why ###Markdown For a misspelling of "definitely": ###Code print_tuples(closest_words(glove, get_vector(glove, 'defintiely') + misspelling_vector)) ###Output (5.4070) definitely (5.5643) certainly (5.7192) sure (5.8152) well (5.8588) always (5.8812) also (5.9557) simply (5.9667) consider (5.9821) probably (5.9948) definately ###Markdown For a misspelling of "consistent": ###Code print_tuples(closest_words(glove, get_vector(glove, 'consistant') + misspelling_vector)) ###Output (5.9641) consistent (6.3674) reliable (7.0195) consistant (7.0299) consistently (7.1605) accurate (7.2737) fairly (7.3037) good (7.3520) reasonable (7.3801) dependable (7.4027) ensure ###Markdown For a misspelling of "package": ###Code print_tuples(closest_words(glove, get_vector(glove, 'pakage') + misspelling_vector)) ###Output (6.6117) package (6.9315) packages (7.0195) pakage (7.0911) comes (7.1241) provide (7.1469) offer (7.1861) reliable (7.2431) well (7.2434) choice (7.2453) offering ###Markdown B - A Closer Look at Word EmbeddingsWe have very briefly covered how word embeddings (also known as word vectors) are used in the tutorials. In this appendix we'll have a closer look at these embeddings and find some (hopefully) interesting results.Embeddings transform a one-hot encoded vector (a vector that is 0 in elements except one, which is 1) into a much smaller dimension vector of real numbers. The one-hot encoded vector is also known as a *sparse vector*, whilst the real valued vector is known as a *dense vector*. The key concept in these word embeddings is that words that appear in similar _contexts_ appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. By context here, we mean the surrounding words. For example in the sentences "I purchased some items at the shop" and "I purchased some items at the store" the words 'shop' and 'store' appear in the same context and thus should be close together in vector space.You may have also heard about *word2vec*. *word2vec* is an algorithm (actually a bunch of algorithms) that calculates word vectors from a corpus. In this appendix we use *GloVe* vectors, *GloVe* being another algorithm to calculate word vectors. If you want to know how *word2vec* works, check out a two part series [here](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) and [here](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/), and if you want to find out more about *GloVe*, check the website [here](https://nlp.stanford.edu/projects/glove/).In PyTorch, we use word vectors with the `nn.Embedding` layer, which takes a _**[sentence length, batch size]**_ tensor and transforms it into a _**[sentence length, batch size, embedding dimensions]**_ tensor.In tutorial 2 onwards, we also used pre-trained word embeddings (specifically the GloVe vectors) provided by TorchText. These embeddings have been trained on a gigantic corpus. We can use these pre-trained vectors within any of our models, with the idea that as they have already learned the context of each word they will give us a better starting point for our word vectors. This usually leads to faster training time and/or improved accuracy.In this appendix we won't be training any models, instead we'll be looking at the word embeddings and finding a few interesting things about them.A lot of the code from the first half of this appendix is taken from [here](https://github.com/spro/practical-pytorch/blob/master/glove-word-vectors/glove-word-vectors.ipynb). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). Loading the GloVe vectorsFirst, we'll load the GloVe vectors. The `name` field specifies what the vectors have been trained on, here the `6B` means a corpus of 6 billion words. The `dim` argument specifies the dimensionality of the word vectors. GloVe vectors are available in 50, 100, 200 and 300 dimensions. There is also a `42B` and `840B` glove vectors, however they are only available at 300 dimensions. ###Code import torchtext.vocab glove = torchtext.vocab.GloVe(name = '6B', dim = 100) print(f'There are {len(glove.itos)} words in the vocabulary') ###Output There are 400000 words in the vocabulary ###Markdown As shown above, there are 400,000 unique words in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.**`glove.vectors` is the actual tensor containing the values of the embeddings. ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown We can see what word is associated with each row by checking the `itos` (int to string) list. Below implies that row 0 is the vector associated with the word 'the', row 1 for ',' (comma), row 2 for '.' (period), etc. ###Code glove.itos[:10] ###Output _____no_output_____ ###Markdown We can also use the `stoi` (string to int) dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. ###Code glove.stoi['the'] ###Output _____no_output_____ ###Markdown We can get the vector of a word by first getting the integer associated with it and then indexing into the word embedding tensor with that index. ###Code glove.vectors[glove.stoi['the']].shape ###Output _____no_output_____ ###Markdown We'll be doing this a lot, so we'll create a function that takes in word embeddings and a word then returns the associated vector. It'll also throw an error if the word doesn't exist in the vocabulary. ###Code def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] ###Output _____no_output_____ ###Markdown As before, we use a word to get the associated vector. ###Code get_vector(glove, 'the').shape ###Output _____no_output_____ ###Markdown Similar ContextsNow to start looking at the context of different words. If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary calculating the distance between the vector of each word and our input word vector. We then sort these from closest to furthest away.The function below returns the closest 10 words to an input word vector: ###Code import torch def closest_words(embeddings, vector, n = 10): distances = [(word, torch.dist(vector, get_vector(embeddings, word)).item()) for word in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] ###Output _____no_output_____ ###Markdown Let's try it out with 'korea'. The closest word is the word 'korea' itself (not very interesting), however all of the words are related in some way. Pyongyang is the capital of North Korea, DPRK is the official name of North Korea, etc.Interestingly, we also get 'Japan' and 'China', implies that Korea, Japan and China are frequently talked about together in similar contexts. This makes sense as they are geographically situated near each other. ###Code word_vector = get_vector(glove, 'korea') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India (geographically), but Thailand and Malaysia are closer. So why is Australia closer to India in vector space? This is most probably due to India and Australia appearing in the context of [cricket](https://en.wikipedia.org/wiki/Cricket) matches together. ###Code word_vector = get_vector(glove, 'india') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown We'll also create another function that will nicely print out the tuples returned by our `closest_words` function. ###Code def print_tuples(tuples): for w, d in tuples: print(f'({d:02.04f}) {w}') ###Output _____no_output_____ ###Markdown A final word to look at, 'sports'. As we can see, the closest words are most of the sports themselves. ###Code word_vector = get_vector(glove, 'sports') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) sports (3.5875) sport (4.4590) soccer (4.6508) basketball (4.6561) baseball (4.8028) sporting (4.8763) football (4.9624) professional (4.9824) entertainment (5.0975) media ###Markdown AnalogiesAnother property of word embeddings is that they can be operated on just as any standard vector and give interesting results.We'll show an example of this first, and then explain it: ###Code def analogy(embeddings, word1, word2, word3, n=5): #get vectors for each word word1_vector = get_vector(embeddings, word1) word2_vector = get_vector(embeddings, word2) word3_vector = get_vector(embeddings, word3) #calculate analogy vector analogy_vector = word2_vector - word1_vector + word3_vector #find closest words to analogy vector candidate_words = closest_words(embeddings, analogy_vector, n+3) #filter out words already in analogy candidate_words = [(word, dist) for (word, dist) in candidate_words if word not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words print_tuples(analogy(glove, 'man', 'king', 'woman')) ###Output man is to king as woman is to... (4.0811) queen (4.6429) monarch (4.9055) throne (4.9216) elizabeth (4.9811) prince ###Markdown This is the canonical example which shows off this property of word embeddings. So why does it work? Why does the vector of 'woman' added to the vector of 'king' minus the vector of 'man' give us 'queen'?If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is a queen!We can do this with other analogies too. For example, this gets an "acting career vector": ###Code print_tuples(analogy(glove, 'man', 'actor', 'woman')) ###Output man is to actor as woman is to... (2.8133) actress (5.0039) comedian (5.1399) actresses (5.2773) starred (5.3085) screenwriter ###Markdown For a "baby animal vector": ###Code print_tuples(analogy(glove, 'cat', 'kitten', 'dog')) ###Output cat is to kitten as dog is to... (3.8146) puppy (4.2944) rottweiler (4.5888) puppies (4.6086) pooch (4.6520) pug ###Markdown A "capital city vector": ###Code print_tuples(analogy(glove, 'france', 'paris', 'england')) ###Output france is to paris as england is to... (4.1426) london (4.4938) melbourne (4.7087) sydney (4.7630) perth (4.7952) birmingham ###Markdown A "musician's genre vector": ###Code print_tuples(analogy(glove, 'elvis', 'rock', 'eminem')) ###Output elvis is to rock as eminem is to... (5.6597) rap (6.2057) rappers (6.2161) rapper (6.2444) punk (6.2690) hop ###Markdown And an "ingredient vector": ###Code print_tuples(analogy(glove, 'beer', 'barley', 'wine')) ###Output beer is to barley as wine is to... (5.6021) grape (5.6760) beans (5.8174) grapes (5.9035) lentils (5.9454) figs ###Markdown Correcting Spelling MistakesAnother interesting property of word embeddings is that they can actually be used to correct spelling mistakes! We'll put their findings into code and briefly explain them, but to read more about this, check out the [original thread](http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411) and the associated [write-up](https://blog.usejournal.com/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26).First, we need to load up the much larger vocabulary GloVe vectors, this is due to the spelling mistakes not appearing in the smaller vocabulary. **Note**: these vectors are very large (~2GB), so watch out if you have a limited internet connection. ###Code glove = torchtext.vocab.GloVe(name = '840B', dim = 300) ###Output _____no_output_____ ###Markdown Checking the vocabulary size of these embeddings, we can see we now have over 2 million unique words in our vocabulary! ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown As the vectors were trained with a much larger vocabulary on a larger corpus of text, the words that appear are a little different. Notice how the words 'north', 'south', 'pyongyang' and 'dprk' no longer appear in the most closest words to 'korea'. ###Code word_vector = get_vector(glove, 'korea') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) korea (3.9857) taiwan (4.4022) korean (4.9016) asia (4.9593) japan (5.0721) seoul (5.4058) thailand (5.6025) singapore (5.7010) russia (5.7240) hong ###Markdown Our first step to correcting spelling mistakes is looking at the vector for a misspelling of the word 'reliable'. ###Code word_vector = get_vector(glove, 'relieable') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) relieable (5.0366) relyable (5.2610) realible (5.4719) realiable (5.5402) relable (5.5917) relaible (5.6412) reliabe (5.8802) relaiable (5.9593) stabel (5.9981) consitant ###Markdown Notice how the correct spelling, "reliable", does not appear in the top 10 closest words. Surely the misspellings of a word should appear next to the correct spelling of the word as they appear in the same context, right? The hypothesis is that misspellings of words are all equally shifted away from their correct spelling. This is because articles of text that contain spelling mistakes are usually written in an informal manner where correct spelling doesn't matter as much (such as tweets/blog posts), thus spelling errors will appear together as they appear in context of informal articles.Similar to how we created analogies before, we can create a "correct spelling" vector. This time, instead of using a single example to create our vector, we'll use the average of multiple examples. This will hopefully give better accuracy!We first create a vector for the correct spelling, 'reliable', then calculate the difference between the "reliable vector" and each of the 8 misspellings of 'reliable'. As we are going to concatenate these 8 misspelling tensors together we need to unsqueeze a "batch" dimension to them. ###Code reliable_vector = get_vector(glove, 'reliable') reliable_misspellings = ['relieable', 'relyable', 'realible', 'realiable', 'relable', 'relaible', 'reliabe', 'relaiable'] diff_reliable = [(reliable_vector - get_vector(glove, s)).unsqueeze(0) for s in reliable_misspellings] ###Output _____no_output_____ ###Markdown We take the average of these 8 'difference from reliable' vectors to get our "misspelling vector". ###Code misspelling_vector = torch.cat(diff_reliable, dim = 0).mean(dim = 0) ###Output _____no_output_____ ###Markdown We can now correct other spelling mistakes using this "misspelling vector" by finding the closest words to the sum of the vector of a misspelled word and the "misspelling vector".For a misspelling of "because": ###Code word_vector = get_vector(glove, 'becuase') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.1090) because (6.4250) even (6.4358) fact (6.4914) sure (6.5094) though (6.5601) obviously (6.5682) reason (6.5856) if (6.6099) but (6.6415) why ###Markdown For a misspelling of "definitely": ###Code word_vector = get_vector(glove, 'defintiely') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.4070) definitely (5.5643) certainly (5.7192) sure (5.8152) well (5.8588) always (5.8812) also (5.9557) simply (5.9667) consider (5.9821) probably (5.9948) definately ###Markdown For a misspelling of "consistent": ###Code word_vector = get_vector(glove, 'consistant') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.9641) consistent (6.3674) reliable (7.0195) consistant (7.0299) consistently (7.1605) accurate (7.2737) fairly (7.3037) good (7.3520) reasonable (7.3801) dependable (7.4027) ensure ###Markdown For a misspelling of "package": ###Code word_vector = get_vector(glove, 'pakage') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.6117) package (6.9315) packages (7.0195) pakage (7.0911) comes (7.1241) provide (7.1469) offer (7.1861) reliable (7.2431) well (7.2434) choice (7.2453) offering ###Markdown B - A Closer Look at Word EmbeddingsWe have very briefly covered how word embeddings (also known as word vectors) are used in the tutorials. In this appendix we'll have a closer look at these embeddings and find some (hopefully) interesting results.Embeddings transform a one-hot encoded vector (a vector that is 0 in elements except one, which is 1) into a much smaller dimension vector of real numbers. The one-hot encoded vector is also known as a *sparse vector*, whilst the real valued vector is known as a *dense vector*. The key concept in these word embeddings is that words that appear in similar _contexts_ appear nearby in the vector space, i.e. the Euclidean distance between these two word vectors is small. By context here, we mean the surrounding words. For example in the sentences "I purchased some items at the shop" and "I purchased some items at the store" the words 'shop' and 'store' appear in the same context and thus should be close together in vector space.You may have also heard about *word2vec*. *word2vec* is an algorithm (actually a bunch of algorithms) that calculates word vectors from a corpus. In this appendix we use *GloVe* vectors, *GloVe* being another algorithm to calculate word vectors. If you want to know how *word2vec* works, check out a two part series [here](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) and [here](http://mccormickml.com/2017/01/11/word2vec-tutorial-part-2-negative-sampling/), and if you want to find out more about *GloVe*, check the website [here](https://nlp.stanford.edu/projects/glove/).In PyTorch, we use word vectors with the `nn.Embedding` layer, which takes a _**[sentence length, batch size]**_ tensor and transforms it into a _**[sentence length, batch size, embedding dimensions]**_ tensor.In tutorial 2 onwards, we also used pre-trained word embeddings (specifically the GloVe vectors) provided by TorchText. These embeddings have been trained on a gigantic corpus. We can use these pre-trained vectors within any of our models, with the idea that as they have already learned the context of each word they will give us a better starting point for our word vectors. This usually leads to faster training time and/or improved accuracy.In this appendix we won't be training any models, instead we'll be looking at the word embeddings and finding a few interesting things about them.A lot of the code from the first half of this appendix is taken from [here](https://github.com/spro/practical-pytorch/blob/master/glove-word-vectors/glove-word-vectors.ipynb). For more information about word embeddings, go [here](https://monkeylearn.com/blog/word-embeddings-transform-text-numbers/). Loading the GloVe vectorsFirst, we'll load the GloVe vectors. The `name` field specifies what the vectors have been trained on, here the `6B` means a corpus of 6 billion words. The `dim` argument specifies the dimensionality of the word vectors. GloVe vectors are available in 50, 100, 200 and 300 dimensions. There is also a `42B` and `840B` glove vectors, however they are only available at 300 dimensions. ###Code import torchtext.vocab glove = torchtext.vocab.GloVe(name = '6B', dim = 100) print(f'There are {len(glove.itos)} words in the vocabulary') ###Output There are 400000 words in the vocabulary ###Markdown As shown above, there are 400,000 unique words in the GloVe vocabulary. These are the most common words found in the corpus the vectors were trained on. **In these set of GloVe vectors, every single word is lower-case only.**`glove.vectors` is the actual tensor containing the values of the embeddings. ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown We can see what word is associated with each row by checking the `itos` (int to string) list. Below implies that row 0 is the vector associated with the word 'the', row 1 for ',' (comma), row 2 for '.' (period), etc. ###Code glove.itos[:10] ###Output _____no_output_____ ###Markdown We can also use the `stoi` (string to int) dictionary, in which we input a word and receive the associated integer/index. If you try get the index of a word that is not in the vocabulary, you receive an error. ###Code glove.stoi['the'] ###Output _____no_output_____ ###Markdown We can get the vector of a word by first getting the integer associated with it and then indexing into the word embedding tensor with that index. ###Code glove.vectors[glove.stoi['the']].shape ###Output _____no_output_____ ###Markdown We'll be doing this a lot, so we'll create a function that takes in word embeddings and a word then returns the associated vector. It'll also throw an error if the word doesn't exist in the vocabulary. ###Code def get_vector(embeddings, word): assert word in embeddings.stoi, f'*{word}* is not in the vocab!' return embeddings.vectors[embeddings.stoi[word]] ###Output _____no_output_____ ###Markdown As before, we use a word to get the associated vector. ###Code get_vector(glove, 'the').shape ###Output _____no_output_____ ###Markdown Similar ContextsNow to start looking at the context of different words. If we want to find the words similar to a certain input word, we first find the vector of this input word, then we scan through our vocabulary calculating the distance between the vector of each word and our input word vector. We then sort these from closest to furthest away.The function below returns the closest 10 words to an input word vector: ###Code import torch def closest_words(embeddings, vector, n = 10): distances = [(word, torch.dist(vector, get_vector(embeddings, word)).item()) for word in embeddings.itos] return sorted(distances, key = lambda w: w[1])[:n] ###Output _____no_output_____ ###Markdown Let's try it out with 'korea'. The closest word is the word 'korea' itself (not very interesting), however all of the words are related in some way. Pyongyang is the capital of North Korea, DPRK is the official name of North Korea, etc.Interestingly, we also get 'Japan' and 'China', implies that Korea, Japan and China are frequently talked about together in similar contexts. This makes sense as they are geographically situated near each other. ###Code word_vector = get_vector(glove, 'korea') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown Looking at another country, India, we also get nearby countries: Thailand, Malaysia and Sri Lanka (as two separate words). Australia is relatively close to India (geographically), but Thailand and Malaysia are closer. So why is Australia closer to India in vector space? This is most probably due to India and Australia appearing in the context of [cricket](https://en.wikipedia.org/wiki/Cricket) matches together. ###Code word_vector = get_vector(glove, 'india') closest_words(glove, word_vector) ###Output _____no_output_____ ###Markdown We'll also create another function that will nicely print out the tuples returned by our `closest_words` function. ###Code def print_tuples(tuples): for w, d in tuples: print(f'({d:02.04f}) {w}') ###Output _____no_output_____ ###Markdown A final word to look at, 'sports'. As we can see, the closest words are most of the sports themselves. ###Code word_vector = get_vector(glove, 'sports') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) sports (3.5875) sport (4.4590) soccer (4.6508) basketball (4.6561) baseball (4.8028) sporting (4.8763) football (4.9624) professional (4.9824) entertainment (5.0975) media ###Markdown AnalogiesAnother property of word embeddings is that they can be operated on just as any standard vector and give interesting results.We'll show an example of this first, and then explain it: ###Code def analogy(embeddings, word1, word2, word3, n=5): #get vectors for each word word1_vector = get_vector(embeddings, word1) word2_vector = get_vector(embeddings, word2) word3_vector = get_vector(embeddings, word3) #calculate analogy vector analogy_vector = word2_vector - word1_vector + word3_vector #find closest words to analogy vector candidate_words = closest_words(embeddings, analogy_vector, n+3) #filter out words already in analogy candidate_words = [(word, dist) for (word, dist) in candidate_words if word not in [word1, word2, word3]][:n] print(f'{word1} is to {word2} as {word3} is to...') return candidate_words print_tuples(analogy(glove, 'man', 'king', 'woman')) ###Output man is to king as woman is to... (4.0811) queen (4.6429) monarch (4.9055) throne (4.9216) elizabeth (4.9811) prince ###Markdown This is the canonical example which shows off this property of word embeddings. So why does it work? Why does the vector of 'woman' added to the vector of 'king' minus the vector of 'man' give us 'queen'?If we think about it, the vector calculated from 'king' minus 'man' gives us a "royalty vector". This is the vector associated with traveling from a man to his royal counterpart, a king. If we add this "royality vector" to 'woman', this should travel to her royal equivalent, which is a queen!We can do this with other analogies too. For example, this gets an "acting career vector": ###Code print_tuples(analogy(glove, 'man', 'actor', 'woman')) ###Output man is to actor as woman is to... (2.8133) actress (5.0039) comedian (5.1399) actresses (5.2773) starred (5.3085) screenwriter ###Markdown For a "baby animal vector": ###Code print_tuples(analogy(glove, 'cat', 'kitten', 'dog')) ###Output cat is to kitten as dog is to... (3.8146) puppy (4.2944) rottweiler (4.5888) puppies (4.6086) pooch (4.6520) pug ###Markdown A "capital city vector": ###Code print_tuples(analogy(glove, 'france', 'paris', 'england')) ###Output france is to paris as england is to... (4.1426) london (4.4938) melbourne (4.7087) sydney (4.7630) perth (4.7952) birmingham ###Markdown A "musician's genre vector": ###Code print_tuples(analogy(glove, 'elvis', 'rock', 'eminem')) ###Output elvis is to rock as eminem is to... (5.6597) rap (6.2057) rappers (6.2161) rapper (6.2444) punk (6.2690) hop ###Markdown And an "ingredient vector": ###Code print_tuples(analogy(glove, 'beer', 'barley', 'wine')) ###Output beer is to barley as wine is to... (5.6021) grape (5.6760) beans (5.8174) grapes (5.9035) lentils (5.9454) figs ###Markdown Correcting Spelling MistakesAnother interesting property of word embeddings is that they can actually be used to correct spelling mistakes! We'll put their findings into code and briefly explain them, but to read more about this, check out the [original thread](http://forums.fast.ai/t/nlp-any-libraries-dictionaries-out-there-for-fixing-common-spelling-errors/16411) and the associated [write-up](https://blog.usejournal.com/a-simple-spell-checker-built-from-word-vectors-9f28452b6f26).First, we need to load up the much larger vocabulary GloVe vectors, this is due to the spelling mistakes not appearing in the smaller vocabulary. **Note**: these vectors are very large (~2GB), so watch out if you have a limited internet connection. ###Code glove = torchtext.vocab.GloVe(name = '840B', dim = 300) ###Output _____no_output_____ ###Markdown Checking the vocabulary size of these embeddings, we can see we now have over 2 million unique words in our vocabulary! ###Code glove.vectors.shape ###Output _____no_output_____ ###Markdown As the vectors were trained with a much larger vocabulary on a larger corpus of text, the words that appear are a little different. Notice how the words 'north', 'south', 'pyongyang' and 'dprk' no longer appear in the most closest words to 'korea'. ###Code word_vector = get_vector(glove, 'korea') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) korea (3.9857) taiwan (4.4022) korean (4.9016) asia (4.9593) japan (5.0721) seoul (5.4058) thailand (5.6025) singapore (5.7010) russia (5.7240) hong ###Markdown Our first step to correcting spelling mistakes is looking at the vector for a misspelling of the word 'reliable'. ###Code word_vector = get_vector(glove, 'relieable') print_tuples(closest_words(glove, word_vector)) ###Output (0.0000) relieable (5.0366) relyable (5.2610) realible (5.4719) realiable (5.5402) relable (5.5917) relaible (5.6412) reliabe (5.8802) relaiable (5.9593) stabel (5.9981) consitant ###Markdown Notice how the correct spelling, "reliable", does not appear in the top 10 closest words. Surely the misspellings of a word should appear next to the correct spelling of the word as they appear in the same context, right? The hypothesis is that misspellings of words are all equally shifted away from their correct spelling. This is because articles of text that contain spelling mistakes are usually written in an informal manner where correct spelling doesn't matter as much (such as tweets/blog posts), thus spelling errors will appear together as they appear in context of informal articles.Similar to how we created analogies before, we can create a "correct spelling" vector. This time, instead of using a single example to create our vector, we'll use the average of multiple examples. This will hopefully give better accuracy!We first create a vector for the correct spelling, 'reliable', then calculate the difference between the "reliable vector" and each of the 8 misspellings of 'reliable'. As we are going to concatenate these 8 misspelling tensors together we need to unsqueeze a "batch" dimension to them. ###Code reliable_vector = get_vector(glove, 'reliable') reliable_misspellings = ['relieable', 'relyable', 'realible', 'realiable', 'relable', 'relaible', 'reliabe', 'relaiable'] diff_reliable = [(reliable_vector - get_vector(glove, s)).unsqueeze(0) for s in reliable_misspellings] ###Output _____no_output_____ ###Markdown We take the average of these 8 'difference from reliable' vectors to get our "misspelling vector". ###Code misspelling_vector = torch.cat(diff_reliable, dim = 0).mean(dim = 0) ###Output _____no_output_____ ###Markdown We can now correct other spelling mistakes using this "misspelling vector" by finding the closest words to the sum of the vector of a misspelled word and the "misspelling vector".For a misspelling of "because": ###Code word_vector = get_vector(glove, 'becuase') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.1090) because (6.4250) even (6.4358) fact (6.4914) sure (6.5094) though (6.5601) obviously (6.5682) reason (6.5856) if (6.6099) but (6.6415) why ###Markdown For a misspelling of "definitely": ###Code word_vector = get_vector(glove, 'defintiely') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.4070) definitely (5.5643) certainly (5.7192) sure (5.8152) well (5.8588) always (5.8812) also (5.9557) simply (5.9667) consider (5.9821) probably (5.9948) definately ###Markdown For a misspelling of "consistent": ###Code word_vector = get_vector(glove, 'consistant') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (5.9641) consistent (6.3674) reliable (7.0195) consistant (7.0299) consistently (7.1605) accurate (7.2737) fairly (7.3037) good (7.3520) reasonable (7.3801) dependable (7.4027) ensure ###Markdown For a misspelling of "package": ###Code word_vector = get_vector(glove, 'pakage') print_tuples(closest_words(glove, word_vector + misspelling_vector)) ###Output (6.6117) package (6.9315) packages (7.0195) pakage (7.0911) comes (7.1241) provide (7.1469) offer (7.1861) reliable (7.2431) well (7.2434) choice (7.2453) offering
Audio Visual/Problems/ImageClassificationANN.ipynb
###Markdown Image Classification using ANN This Code Template is for Image Classification task using Artificial Neural Network (ANN). Required packages ###Code import warnings warnings.filterwarnings("ignore") import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 import os import random from keras.layers.core import Dense from keras.models import Sequential from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, classification_report from sklearn import metrics ###Output _____no_output_____ ###Markdown InitializationPath of the main folder containing image dataset files ###Code #load the dataset data = '' ###Output _____no_output_____ ###Markdown List of target categories which are required for model training and predictions. ###Code target = [] ###Output _____no_output_____ ###Markdown Data FetchingThe OS module in Python provides functions for interacting with the operating system. This module provides a portable way of using operating system-dependent functionality. OpenCV-Python is a library of Python bindings designed to solve computer vision problems.We will use the "**os.path**" module to interact with the file system and the "**cv2.imread()**" method to load an image from the specified file path. ###Code # Loading the image dataset and plotting some images from all target categories def plot_image(data, target): count = 0 for category in target: path=os.path.join(data, category) flg = 0 ar = [] # storing image paths in an array for img in os.listdir(path): ar.append(os.path.join(path,img)) flg+= 1 if flg==2: break # plotting the images in dataset plt.figure(figsize=(5,5)) for i in range(2): d1 = ar[i] img_array = cv2.imread(d1,cv2.IMREAD_COLOR) img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB) ax = plt.subplot(1,2,i+1) plt.imshow(img_array) plt.title(category) plt.axis("off") count+= 1 if count == len(target): break plot_image(data, target) ###Output _____no_output_____ ###Markdown Data PreprocessingSince we are working with image data we need to preprocess our data because the machine learning model cannot work directly with images. So the images are converted into a format that our machine learning algorithm can understand. Image data is represented as a matrix, where the depth is the number of channels. An RGB image has three channels (red, green, and blue) and we create a function that firstly resizes all the images so that the model receives inputs of the same size and then stores them as an array of RGB values.The different categories of images are also read and labeled (as 0,1,2....). We finally append the images and their labelled category into a single list that acts as our final preprocessed dataset. ###Code def create_training_data(data,target): training_data=[] img_size = 50 for category in target: path=os.path.join(data, category) class_num=target.index(category) for img in os.listdir(path): try: img_array=cv2.imread(os.path.join(path,img)) new_array=cv2.resize(img_array,(img_size,img_size)) training_data.append([new_array,class_num]) except Exception as e: pass return training_data ###Output _____no_output_____ ###Markdown Calling preprocessing function on our actual image data ###Code training_data = create_training_data(data,target) df = pd.DataFrame(training_data, columns=['image', 'label']) df.head() ###Output _____no_output_____ ###Markdown Feature SelectionsAfter preprocessing our data into a single array, we now need to provide the input and the target features for our model. Also the XGBClassifier training model accepts dimension <=2 so we need to convert the image data to 2D by converting our features into numpy arrays and then flattening our input feature X. ###Code lenofimage = len(training_data) X, y = [], [] for categories, label in training_data: X.append(categories) y.append(label) # Flattening our training feature X = np.array(X).reshape(lenofimage,-1) y = np.array(y) ###Output _____no_output_____ ###Markdown Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ###Code X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=123) ###Output _____no_output_____ ###Markdown ModelArtificial Neural Network(ANN)The Artificial Neural Network consists of an input layer, a hidden layer, and an output layer.If ANN model has no hidden layer, it is called single layer perceptron.In contrast, MLP(Multiple Layer Perceptron) model is ANN which has multiple hidden layers (more than 1). ###Code model = Sequential() model.add(Dense(1024, input_shape=(X_train.shape[1],), activation="relu")) # first hidden layer has 1024 nodes model.add(Dense(1, activation="sigmoid")) model.compile(loss='binary_crossentropy',metrics=['accuracy'],optimizer='adam')#initializes model with weights #actuall train model.fit(X_train,y_train,epochs=10) model.summary() ###Output Model: "sequential_12" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_23 (Dense) (None, 1024) 7681024 _________________________________________________________________ dense_24 (Dense) (None, 1) 1025 ================================================================= Total params: 7,682,049 Trainable params: 7,682,049 Non-trainable params: 0 _________________________________________________________________ ###Markdown Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. ###Code y_pred=model.predict(X_test) y_pred yhat=np.round(y_pred) yhat ###Output _____no_output_____ ###Markdown Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ###Code confusion_matrix(y_test,yhat) ###Output _____no_output_____ ###Markdown Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ###Code print(classification_report(y_test,yhat)) ###Output precision recall f1-score support 0 0.64 0.69 0.67 39 1 0.68 0.63 0.66 41 accuracy 0.66 80 macro avg 0.66 0.66 0.66 80 weighted avg 0.66 0.66 0.66 80
cn/.ipynb_checkpoints/sicp-3-16-checkpoint.ipynb
###Markdown SICP 习题 (3.16)解题总结: 序对的统计 SICP 习题 3.16 通过一个错误的序对统计函数讲解car, cdr的具体工作方式。我们在完成SICP习题的时候要时刻留意题目的真正目的。像这道题,目的根本就不是为了计算序对的个数,而是了解序对的工作机制,car,cdr的作用,还有因为set-car! set-cdr!可能导致的复杂情况,如环链。开始细看题目之前稍微重温一下序对的概念,序对就是通过cons创建出来的形如(a . b)的东西,一个列表看起来是(1 2 3)这个样子的,其实它在本质上是(1 . (2 . (3 . ())))。题目说Ben Bitdiddle决定写一个统计序对的函数,代码如下: ###Code (define (count-pairs x) (if (not (pair? x)) 0 (+ (count-pairs (car x)) (count-pairs (cdr x)) 1))) ###Output _____no_output_____ ###Markdown 代码的逻辑初看是没有问题的,我们可以把一个列表当一颗树来看待,分别看看左树有多少个元素,右树有多少个元素,再加上当前这个元素,就是整个列表的元素个数。这个代码对普通的列表是没有问题的,测试一下: ###Code (count-pairs '(1 2 3)) ###Output _____no_output_____ ###Markdown 注意,对下面这种形式的列表,它也是正常工作的,虽然我们看到3个元素,但是事实上里面有4个序对.列表(1 2 (3))在本质上是 (1 . (2 . ((3 . ()) . ()))) ###Code (count-pairs '(1 2 (3))) ###Output _____no_output_____ ###Markdown 但是,如果列表中有序对指向同一个元素,那么就出问题了,看看下面这个结果: ###Code (define leaf (cons 1 2)) (define to-same-leaf (cons leaf leaf)) (define test-sample (cons to-same-leaf 3)) (count-pairs test-sample) ###Output _____no_output_____ ###Markdown 为什么会返回4呢,因为to-same-leaf的car和cdr同时指向了leaf,Ben设计的代码会重复计算leaf这个序对按这个逻辑,我们还可以通过下面的代码统计出5个元素来: ###Code (define leaf (cons 1 2)) (define to-same-leaf (cons leaf leaf)) (define test-sample (cons to-same-leaf leaf)) (count-pairs test-sample) ###Output _____no_output_____ ###Markdown 题目还要求我们找出样例是可以算出7个的,如果理解了Ben的函数的问题所在,就知道我们需要尽可能地重复指向相同的元素,可以是这样: ###Code (define leaf (cons 1 2)) (define to-same-leaf (cons leaf leaf)) (define test-sample (cons to-same-leaf to-same-leaf)) (count-pairs test-sample) ###Output _____no_output_____
Housing-Prices/Data-Exploration.ipynb
###Markdown This is Data Exploration of the housing prices data to view what the data looks like to assist with Machine Learning and prediction ###Code # Dependencies import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline # Loading training data housing_train_data = pd.read_csv('data/train.csv') housing_train_data.head() ###Output _____no_output_____ ###Markdown Examining the Data Structure ###Code # Viewing all columns to view possible features housing_train_data.columns # Dropping ID - not a valuable feature housing_train_data = housing_train_data.drop('Id', axis=1) housing_train_data.head() # See total of values of data housing_train_data.shape # Viewing datatype and total of each column housing_train_data.info() # List of numerical attributes housing_train_data.select_dtypes(exclude=['object']).columns # Look at statistical summary of the numerical attributes housing_train_data.select_dtypes(exclude=['object']).describe().round(decimals=2).transpose() # List of catagorical attributes housing_train_data.select_dtypes(include=['object']).columns # Look at summary of the categorical attributes housing_train_data.select_dtypes(include=['object']).describe().round(decimals=2).transpose() # Look at statistical summary of the target variable (SalePrice) housing_train_data['SalePrice'].describe().round(decimals=2) ###Output _____no_output_____ ###Markdown Data Distribution ###Code # Look at the distribution of the target variable (SalePrice) # Source link for sns.distplot(): https://seaborn.pydata.org/tutorial/distributions.html sns.distplot(housing_train_data['SalePrice']); # Make the distribution more normal with np.log() sns.distplot(np.log(housing_train_data['SalePrice'])) plt.title('Distribution of Log-transformed SalePrice') plt.xlabel('log(SalePrice)') plt.show() # Look at the distribution of the other features (not including target variable) and view what to normalize if needed num_attributes = housing_train_data.select_dtypes(exclude='object').drop(['SalePrice'], axis=1).copy() # num_attributes fig = plt.figure(figsize=(12,18)) for i in range(len(num_attributes.columns)): fig.add_subplot(9,4,i+1) #sns.distplot(num_attributes.iloc[:,i].dropna()) sns.distplot(num_attributes.iloc[:,i].dropna(),hist = False) plt.tight_layout() plt.show() # Look for any outliers of the other features (not including target variable) # These Boxplots are for the visualization aspect. Note: best to use percentile numbers from the summary statistics fig = plt.figure(figsize=(10, 15)) for i in range(len(num_attributes.columns)): fig.add_subplot(9, 4, i+1) sns.boxplot(y=num_attributes.iloc[:,i]) plt.tight_layout() plt.show() ###Output _____no_output_____ ###Markdown Data Correlation Numerical Attributes ###Code # Correlation of numerical attributes corr = housing_train_data.corr() # Basic heatmap sns.heatmap(corr,cmap="YlGnBu") # Format heatmap for better readibility #Source linkfor seaborn heatmap customization: https://seaborn.pydata.org/generated/seaborn.heatmap.html # Using mask to plot only part of a matrix to remove duplicate information f, ax = plt.subplots(figsize=(15, 15)) mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True sns.heatmap(corr, mask=mask, cmap="YlGnBu", square=True, ax=ax, vmin = -1.0, vmax = 1.0, linewidths=.5) # View the correlation numbers of all the attributes to the target variable(SalePrice) # Note to remember: 1 indicates positive correlation, -1 negative correlation and 0 means no relationship corr['SalePrice'].sort_values(ascending=False) # View distribution for certain attributes (SalePrice, OverallQual, GrLivArea, 1stFlrSF, YearBuilt, PoolArea ) col = ['SalePrice', 'OverallQual', 'GrLivArea', '1stFlrSF', 'YearBuilt', 'PoolArea'] sns.pairplot(housing_train_data[col]) ###Output _____no_output_____ ###Markdown Categorical Attributes ###Code # List of catagorical attributes cat_attributes = housing_train_data.select_dtypes(include=['object']).columns cat_attributes # num_attributes = housing_train_data.select_dtypes(exclude='object').drop(['SalePrice'], axis=1).copy() # # num_attributes # Comparing SalePrice and HouseStyle f, ax = plt.subplots(figsize=(9,5)) sns.boxplot(y=housing_train_data['SalePrice'], x=housing_train_data['HouseStyle']) plt.xticks(rotation=40) plt.show() # Counting how many counts in each House Style housing_train_data['HouseStyle'].value_counts() # Viewing if style has changed over years plt.scatter(housing_train_data['YearBuilt'],housing_train_data['HouseStyle']) # Comparing SalePrice and CentralAir f, ax = plt.subplots(figsize=(9,5)) sns.boxplot(y=housing_train_data['SalePrice'], x=housing_train_data['CentralAir']) plt.xticks(rotation=40) plt.show() # Comparing SalePrice and Utilities f, ax = plt.subplots(figsize=(9,5)) sns.boxplot(y=housing_train_data['SalePrice'], x=housing_train_data['Utilities']) plt.xticks(rotation=40) plt.show() # Comparing SalePrice and BldgType f, ax = plt.subplots(figsize=(9,5)) sns.boxplot(y=housing_train_data['SalePrice'], x=housing_train_data['BldgType']) plt.xticks(rotation=40) plt.show() # Counting how many counts in each Building House Style housing_train_data['BldgType'].value_counts() # Viewing if style has changed over years plt.scatter(housing_train_data['YearBuilt'],housing_train_data['BldgType']) # Comparing SalePrice and Exterior1st f, ax = plt.subplots(figsize=(9,5)) sns.boxplot(y=housing_train_data['SalePrice'], x=housing_train_data['Exterior1st']) plt.xticks(rotation=40) plt.show() # Counting how many counts in each Exterior Style housing_train_data['Exterior1st'].value_counts() # Viewing if style has changed over years plt.scatter(housing_train_data['YearBuilt'],housing_train_data['Exterior1st']) ###Output _____no_output_____
machine_learning_2/practice/lab2/lab2.ipynb
###Markdown Submitters:* Arthur Bražinskas* Minh Ngo Lab 2: Inference in Graphical Models Machine Learning 2, 2016* The lab exercises should be made in groups of two people.* The deadline is Sunday, April 24, 23:59.* Assignment should be sent to t.s.cohen at uva dot nl (Taco Cohen). The subject line of your email should be "[ML2_2016] lab_lastname1\_lastname2". * Put your and your teammate's names in the body of the email* Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file follows the same rule as the subject line.Notes on implementation:* You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please contact us.* Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline.* NOTE: test your code and make sure we can run your notebook / scripts! IntroductionIn this assignment, we will implement the sum-product and max-sum algorithms for factor graphs over discrete variables. The relevant theory is covered in chapter 8 of Bishop's PRML book, in particular section 8.4. Read this chapter carefuly before continuing!We will first implement sum-product and max-sum and apply it to a simple poly-tree structured factor graph for medical diagnosis. Then, we will implement a loopy version of the algorithms and use it for image denoising.For this assignment we recommended you stick to numpy ndarrays (constructed with np.array, np.zeros, np.ones, etc.) as opposed to numpy matrices, because arrays can store n-dimensional arrays whereas matrices only work for 2d arrays. We need n-dimensional arrays in order to store conditional distributions with more than 1 conditioning variable. If you want to perform matrix multiplication on arrays, use the np.dot function; all infix operators including *, +, -, work element-wise on arrays. Part 1: The sum-product algorithmWe will implement a datastructure to store a factor graph and to facilitate computations on this graph. Recall that a factor graph consists of two types of nodes, factors and variables. Below you will find some classes for these node types to get you started. Carefully inspect this code and make sure you understand what it does; you will have to build on it later. ###Code %pylab inline np.set_printoptions(precision=4) class Node(object): """ Base-class for Nodes in a factor graph. Only instantiate sub-classes of Node. """ def __init__(self, name): # A name for this Node, for printing purposes self.name = name # Neighbours in the graph, identified with their index in this list. # i.e. self.neighbours contains neighbour 0 through len(self.neighbours) - 1. self.neighbours = [] # Reset the node-state (not the graph topology) self.reset() def reset(self): # Incoming messages; a dictionary mapping neighbours to messages. # That is, it maps Node -> np.ndarray. self.in_msgs = {} # A set of neighbours for which this node has pending messages. # We use a python set object so we don't have to worry about duplicates. self.pending = set([]) def add_neighbour(self, nb): self.neighbours.append(nb) def send_sp_msg(self, other): # To be implemented in subclass. raise Exception('Method send_sp_msg not implemented in base-class Node') def send_ms_msg(self, other): # To be implemented in subclass. raise Exception('Method send_ms_msg not implemented in base-class Node') def receive_msg(self, other, msg): # Store the incomming message, replacing previous messages from the same node self.in_msgs[other] = msg # TODO: add pending messages # self.pending.update(...) def __str__(self): # This is printed when using 'print node_instance' return self.name class Variable(Node): def __init__(self, name, num_states): """ Variable node constructor. Args: name: a name string for this node. Used for printing. num_states: the number of states this variable can take. Allowable states run from 0 through (num_states - 1). For example, for a binary variable num_states=2, and the allowable states are 0, 1. """ self.num_states = num_states # Call the base-class constructor super(Variable, self).__init__(name) def set_observed(self, observed_state): """ Set this variable to an observed state. Args: observed_state: an integer value in [0, self.num_states - 1]. """ # Observed state is represented as a 1-of-N variable # Could be 0.0 for sum-product, but log(0.0) = -inf so a tiny value is preferable for max-sum self.observed_state[:] = 0.000001 self.observed_state[observed_state] = 1.0 def set_latent(self): """ Erase an observed state for this variable and consider it latent again. """ # No state is preferred, so set all entries of observed_state to 1.0 # Using this representation we need not differentiate between observed and latent # variables when sending messages. self.observed_state[:] = 1.0 def reset(self): super(Variable, self).reset() self.observed_state = np.ones(self.num_states) def marginal(self, Z=None): """ Compute the marginal distribution of this Variable. It is assumed that message passing has completed when this function is called. Args: Z: an optional normalization constant can be passed in. If None is passed, Z is computed. Returns: marginal, Z. The first is a numpy array containing the normalized marginal distribution. Z is either equal to the input Z, or computed in this function (if Z=None was passed). """ # TODO: compute marginal return None, Z def send_sp_msg(self, other): # TODO: implement Variable -> Factor message for sum-product pass def send_ms_msg(self, other): # TODO: implement Variable -> Factor message for max-sum pass class Factor(Node): def __init__(self, name, f, neighbours): """ Factor node constructor. Args: name: a name string for this node. Used for printing f: a numpy.ndarray with N axes, where N is the number of neighbours. That is, the axes of f correspond to variables, and the index along that axes corresponds to a value of that variable. Each axis of the array should have as many entries as the corresponding neighbour variable has states. neighbours: a list of neighbouring Variables. Bi-directional connections are created. """ # Call the base-class constructor super(Factor, self).__init__(name) assert len(neighbours) == f.ndim, 'Factor function f should accept as many arguments as this Factor node has neighbours' for nb_ind in range(len(neighbours)): nb = neighbours[nb_ind] assert f.shape[nb_ind] == nb.num_states, 'The range of the factor function f is invalid for input %i %s' % (nb_ind, nb.name) self.add_neighbour(nb) nb.add_neighbour(self) self.f = f def send_sp_msg(self, other): # TODO: implement Factor -> Variable message for sum-product pass def send_ms_msg(self, other): # TODO: implement Factor -> Variable message for max-sum pass # returns all the messages from neighbours except the other def get_neighbour_messages(self,other): mes = [] for ne in self.neighbours: if ne==other: continue if ne not in self.in_msgs: raise Exception('Some messages are not still received') mes.append(self.in_msgs[ne]) return mes Node.get_neighbour_messages = get_neighbour_messages # a procedure that sends messages def send_msg_proc(self, other, mes): # print "mes "+self.name+"-->"+str(other.name)+" : "+ str(mes) other.receive_msg(self,mes) self.pending.remove(other) Node.send_msg_proc = send_msg_proc ###Output _____no_output_____ ###Markdown 1.1 Instantiate network (10 points)Convert the directed graphical model ("Bayesian Network") shown below to a factor graph. Instantiate this graph by creating Variable and Factor instances and linking them according to the graph structure. To instantiate the factor graph, first create the Variable nodes and then create Factor nodes, passing a list of neighbour Variables to each Factor.Use the following prior and conditional probabilities.$$p(\verb+Influenza+) = 0.05 \\\\p(\verb+Smokes+) = 0.2 \\\\$$$$p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001 \\\\p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\p(\verb+Fever+ = 1| \verb+Influenza+ = 0) = 0.05 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001 \\\\p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07 \\\\p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001 \\\\$$ ###Code from IPython.core.display import Image Image(filename='bn.png') ###Output _____no_output_____ ###Markdown We have transformed the bayesian network to a factor graph as following:![Imgur](http://i.imgur.com/cG2gNen.png)where:$$f_1 = p(ST|I)\\\\f_2 = p(F|I)\\\\f_3 = p(B|I,S)\\\\f_4 = p(C|B)\\\\f_5 = p(W|B)\\\\f_6 = p(S)\\\\f_7 = p(I)$$ ###Code # Variables def init_variables(): I = Variable('Influenza', 2) S = Variable('Smokes', 2) ST = Variable('SoreThroat', 2) F = Variable('Fever', 2) B = Variable('Bronchitits', 2) C = Variable('Coughing', 2) W = Variable('Wheezing', 2) return I, S, ST, F, B, C, W ###Output _____no_output_____ ###Markdown $$p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001$$ ###Code # Factor nodes def init_f1(I, ST): # Order: I, ST f1_weights = np.empty((2, 2)) f1_weights[1, 1] = 0.3 f1_weights[0, 1] = 0.001 f1_weights[:, 0] = 1 - f1_weights[:, 1] f1 = Factor('f1', f1_weights, [I, ST]) return f1 ###Output _____no_output_____ ###Markdown $$p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\p(\verb+Fever+ = 1| \verb+Influenza+ = 0) = 0.05$$ ###Code def init_f2(I, F): # Order: I, F f2_weights = np.empty((2, 2)) f2_weights[1, 1] = 0.9 f2_weights[0, 1] = 0.05 f2_weights[:, 0] = 1 - f2_weights[:, 1] f2 = Factor('f2', f2_weights, [I, F]) return f2 ###Output _____no_output_____ ###Markdown $$p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001$$ ###Code def init_f3(I, S, B): # Order: I, S, B f3_weights = np.empty((2, 2, 2)) f3_weights[1, 1, 1] = 0.99 f3_weights[1, 0, 1] = 0.9 f3_weights[0, 1, 1] = 0.7 f3_weights[0, 0, 1] = 0.0001 f3_weights[:, :, 0] = 1 - f3_weights[:, :, 1] f3 = Factor('f3', f3_weights, [I, S, B]) return f3 ###Output _____no_output_____ ###Markdown $$p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07$$ ###Code def init_f4(B, C): # Order: B, C f4_weights = np.empty((2, 2)) f4_weights[1, 1] = 0.8 f4_weights[0, 1] = 0.07 f4_weights[:, 0] = 1 - f4_weights[:, 1] f4 = Factor('f4', f4_weights, [B, C]) return f4 ###Output _____no_output_____ ###Markdown $$p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001$$ ###Code def init_f5(B, W): # Order: B, W f5_weights = np.empty((2, 2)) f5_weights[1, 1] = 0.6 f5_weights[0, 1] = 0.001 f5_weights[:, 0] = 1 - f5_weights[:, 1] f5 = Factor('f5', f5_weights, [B, W]) return f5 ###Output _____no_output_____ ###Markdown $$p(\verb+Smokes+) = 0.2$$ ###Code def init_f6(S): f6_weights = np.array([0.8, 0.2]) f6 = Factor('f6', f6_weights, [S]) return f6 ###Output _____no_output_____ ###Markdown $$p(\verb+Influenza+) = 0.05$$ ###Code def init_f7(I): f7_weights = np.array([0.95, 0.05]) f7 = Factor('f7', f7_weights, [I]) return f7 ###Output _____no_output_____ ###Markdown Score 10/10 1.2 Factor to variable messages (20 points)Write a method `send_sp_msg(self, other)` for the Factor class, that checks if all the information required to pass a message to Variable `other` is present, computes the message and sends it to `other`. "Sending" here simply means calling the `receive_msg` function of the receiving node (we will implement this later). The message itself should be represented as a numpy array (np.array) whose length is equal to the number of states of the variable.An elegant and efficient solution can be obtained using the n-way outer product of vectors. This product takes n vectors $\mathbf{x}^{(1)}, \ldots, \mathbf{x}^{(n)}$ and computes a $n$-dimensional tensor (ndarray) whose element $i_0,i_1,...,i_n$ is given by $\prod_j \mathbf{x}^{(j)}_{i_j}$. In python, this is realized as `np.multiply.reduce(np.ix_(*vectors))` for a python list `vectors` of 1D numpy arrays. Try to figure out how this statement works -- it contains some useful functional programming techniques. Another function that you may find useful in computing the message is `np.tensordot`. ###Code def factor_send_sp_msg(self, other): assert isinstance(other,Variable) if other not in self.neighbours: raise Exception('The specified node is not a neighbour') factor_indexes = range(len(self.neighbours)) factor_indexes.remove(self.neighbours.index(other)) message_indexes = range(len(factor_indexes)) # computing messages mes = self.get_neighbour_messages(other) mes = np.tensordot(self.f,np.multiply.reduce(np.ix_(*mes)),axes=(factor_indexes,message_indexes)) # sending the message self.send_msg_proc(other,mes) Factor.send_sp_msg=factor_send_sp_msg ###Output _____no_output_____ ###Markdown Score 20/20 1.3 Variable to factor messages (10 points)Write a method `send_sp_message(self, other)` for the Variable class, that checks if all the information required to pass a message to Variable var is present, computes the message and sends it to factor. ###Code def variable_send_sp_msg(self, other): assert isinstance(other, Factor) # checking if the target factor node has the current node in its neighbours assert self in other.neighbours # computing the message mes = np.prod(self.get_neighbour_messages(other),axis=0) * self.observed_state self.send_msg_proc(other,mes) Variable.send_sp_msg = variable_send_sp_msg ###Output _____no_output_____ ###Markdown Score 10/10 1.4 Compute marginal (10 points)Later in this assignment, we will implement message passing schemes to do inference. Once the message passing has completed, we will want to compute local marginals for each variable.Write the method `marginal` for the Variable class, that computes a marginal distribution over that node. ###Code def variable_marginal(self, Z=None): """ Compute the marginal distribution of this Variable. It is assumed that message passing has completed when this function is called. Args: Z: an optional normalization constant can be passed in. If None is passed, Z is computed. Returns: marginal, Z. The first is a numpy array containing the normalized marginal distribution. Z is either equal to the input Z, or computed in this function (if Z=None was passed). """ mar = np.prod(self.in_msgs.values(),axis=0) * self.observed_state if Z==None:Z=sum(mar) return mar/Z, Z Variable.marginal = variable_marginal ###Output _____no_output_____ ###Markdown Score 10/10 1.5 Receiving messages (10 points)In order to implement the loopy and non-loopy message passing algorithms, we need some way to determine which nodes are ready to send messages to which neighbours. To do this in a way that works for both loopy and non-loopy algorithms, we make use of the concept of "pending messages", which is explained in Bishop (8.4.7): "we will say that a (variable or factor)node a has a message pending on its link to a node b if node a has received anymessage on any of its other links since the last time it send (sic) a message to b. Thus,when a node receives a message on one of its links, this creates pending messageson all of its other links."Keep in mind that for the non-loopy algorithm, nodes may not have received any messages on some or all of their links. Therefore, before we say node a has a pending message for node b, we must check that node a has received all messages needed to compute the message that is to be sent to b.Modify the function `receive_msg`, so that it updates the self.pending variable as described above. The member self.pending is a set that is to be filled with Nodes to which self has pending messages. Modify the `send_msg` functions to remove pending messages as they are sent. ###Code def node_receive_msg(self, other, msg): # Store the incoming message, replacing previous messages from the same node self.in_msgs[other] = msg # print '%s receives message from %s: %s' % (self, other, msg) for neighbour in set(self.neighbours) - {other}: if neighbour in self.in_msgs: # If received all messages from neighbours if len(self.in_msgs) == len(self.neighbours): self.pending.update([neighbour]) elif len(self.in_msgs) == len(self.neighbours) - 1: # If all incoming messages received self.pending.update([neighbour]) Node.receive_msg = node_receive_msg ###Output _____no_output_____ ###Markdown Score 10/10 1.6 Inference Engine (10 points)Write a function `sum_product(node_list)` that runs the sum-product message passing algorithm on a tree-structured factor graph with given nodes. The input parameter `node_list` is a list of all Node instances in the graph, which is assumed to be ordered correctly. That is, the list starts with a leaf node, which can always send a message. Subsequent nodes in `node_list` should be capable of sending a message when the pending messages of preceding nodes in the list have been sent. The sum-product algorithm then proceeds by passing over the list from beginning to end, sending all pending messages at the nodes it encounters. Then, in reverse order, the algorithm traverses the list again and again sends all pending messages at each node as it is encountered. For this to work, you must initialize pending messages for all the leaf nodes, e.g. `influenza_prior.pending.add(influenza)`, where `influenza_prior` is a Factor node corresponding the the prior, `influenza` is a Variable node and the only connection of `influenza_prior` goes to `influenza`. ###Code def apply_algorithm(node_list, func): for node in node_list: for other in list(node.pending): func(node, other) def sum_product(node, other): node.send_sp_msg(other) def configure_experiment(): variables = init_variables() I, S, ST, F, B, C, W = variables f1 = init_f1(I, ST) f2 = init_f2(I, F) f3 = init_f3(I, S, B) f4 = init_f4(B, C) f5 = init_f5(B, W) f6 = init_f6(S) f7 = init_f7(I) f6.pending.update([S]) f7.pending.update([I]) ST.pending.update([f1]) F.pending.update([f2]) C.pending.update([f4]) W.pending.update([f5]) return (I, S, ST, F, B, C, W), (f1, f2, f3, f4, f5, f6, f7) def print_marginals(variables): for variable in variables: marginal, Z = variable.marginal(None) print variable, marginal variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print '-----Forward pass-----' apply_algorithm(node_list, sum_product) ST.pending.update([f1]) print '-----Backward pass-----' apply_algorithm(reversed(node_list), sum_product) print '-----Marginals-----' print_marginals(variables) ###Output -----Forward pass----- -----Backward pass----- -----Marginals----- Influenza [ 0.95 0.05] Smokes [ 0.8 0.2] SoreThroat [ 0.984 0.0159] Fever [ 0.9075 0.0925] Bronchitits [ 0.821 0.179] Coughing [ 0.7993 0.2007] Wheezing [ 0.8918 0.1082] ###Markdown Score 10/10 1.7 Observed variables and probabilistic queries (15 points)We will now use the inference engine to answer probabilistic queries. That is, we will set certain variables to observed values, and obtain the marginals over latent variables. We have already provided functions `set_observed` and `set_latent` that manage a member of Variable called `observed_state`. Modify the `Variable.send_msg` and `Variable.marginal` routines that you wrote before, to use `observed_state` so as to get the required marginals when some nodes are observed. ###Code variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors B.set_observed(1) node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print 'Forward pass' apply_algorithm(node_list, sum_product) ST.pending.update([f1]) print 'Backward pass' apply_algorithm(reversed(node_list), sum_product) print_marginals(variables) ###Output Forward pass Backward pass Influenza [ 0.7435 0.2565] Smokes [ 0.2016 0.7984] SoreThroat [ 0.9223 0.0777] Fever [ 0.732 0.268] Bronchitits [ 4.5873e-06 1.0000e+00] Coughing [ 0.2 0.8] Wheezing [ 0.4 0.6] ###Markdown Score 15/15 1.8 Sum-product and MAP states (5 points)A maximum a posteriori state (MAP-state) is an assignment of all latent variables that maximizes the probability of latent variables given observed variables:$$\mathbf{x}_{\verb+MAP+} = \arg\max _{\mathbf{x}} p(\mathbf{x} | \mathbf{y})$$Could we use the sum-product algorithm to obtain a MAP state? If yes, how? If no, why not? **Answer**: It's possible to compute MAP by factorizing $p(x|y)$ into components and considering all possible combinations of its components, for example:$$p(x|y) = p(x_1)p(y_1|x_1)p(x_3|x_1)p(x_2|y_1)$$in the case of a bayesian network $y_1 \rightarrow x_2$, $x_1 \rightarrow x_3$, $y_1 \rightarrow x_1$, and $y_1$ is observed. By considering all possibilites of states $p(x_1)$, $p(y_1|x_1)$, etc and computing them using sum-product algorithm, we can compute MAP but it's not efficient, that's why we need max-sum.Technically, we could compute conditional probabilities using sum-product by using set_observed() methods, which we used in the previous task. The main inefficiency is caused by the fact that we will be forced to recompute messages once the combination of initially observed states changes. Score 0/5We cannot use the sum-product algorithm to compute MAP states, because SP computes marginals. The maximum of each marginal will not give the global MAP state. We cannot use SP to compute the MAP, even in an inefficient algorithm. Part 2: The max-sum algorithmNext, we implement the max-sum algorithm as described in section 8.4.5 of Bishop. 2.1 Factor to variable messages (10 points)Implement the function `Factor.send_ms_msg` that sends Factor -> Variable messages for the max-sum algorithm. It is analogous to the `Factor.send_sp_msg` function you implemented before. ###Code def factor_send_ms_msg(self, other): assert isinstance(other,Variable) if not other in self.neighbours: raise Exception('The specified node is not a neighbour') factor_indexes = range(len(self.neighbours)) factor_indexes.remove(self.neighbours.index(other)) mes = self.get_neighbour_messages(other) # computing messages mes = np.expand_dims(np.add.reduce(np.ix_(*mes)), self.neighbours.index(other)) mes = np.apply_over_axes(np.amax, np.log(self.f)+mes,factor_indexes).squeeze() # sending the message self.send_msg_proc(other,mes) Factor.send_ms_msg = factor_send_ms_msg ###Output _____no_output_____ ###Markdown Score 10/10 2.2 Variable to factor messages (10 points)Implement the `Variable.send_ms_msg` function that sends Variable -> Factor messages for the max-sum algorithm. ###Code def variable_send_ms_msg(self, other): assert isinstance(other, Factor) # checking if the target factor node has the current node in its neighbours assert self in other.neighbours # computing the message mes = np.sum(self.get_neighbour_messages(other),axis=0)+np.log(self.observed_state) self.send_msg_proc(other, mes) Variable.send_ms_msg = variable_send_ms_msg ###Output _____no_output_____ ###Markdown Score 10/10 2.3 Find a MAP state (10 points)Using the same message passing schedule we used for sum-product, implement the max-sum algorithm. For simplicity, we will ignore issues relating to non-unique maxima. So there is no need to implement backtracking; the MAP state is obtained by a per-node maximization (eq. 8.98 in Bishop). Make sure your algorithm works with both latent and observed variables. ###Code def map_state(self): # Returns 0 state or 1 return np.argmax(np.add.reduce(self.in_msgs.values()) + np.log(self.observed_state)) Variable.map_state = map_state def max_sum(node, other): node.send_ms_msg(other) def print_map_states(variables): for variable in variables: map_state = variable.map_state() print variable, map_state variables, factors = configure_experiment() I, S, ST, F, B, C, W = variables f1, f2, f3, f4, f5, f6, f7 = factors B.set_observed(1) node_list = [f6, f7, W, C, F, f4, f5, S, f2, B, f3, I, f1, ST] print 'Forward pass' apply_algorithm(node_list, max_sum) ST.pending.update([f1]) print 'Backward pass' apply_algorithm(reversed(node_list), max_sum) print_map_states(variables) ###Output Forward pass Backward pass Influenza 0 Smokes 0 SoreThroat 0 Fever 0 Bronchitits 1 Coughing 0 Wheezing 0 ###Markdown Score 10/10 Part 3: Image Denoising and Loopy BPNext, we will use a loopy version of max-sum to perform denoising on a binary image. The model itself is discussed in Bishop 8.3.3, but we will use loopy max-sum instead of Iterative Conditional Modes as Bishop does.The following code creates some toy data. `im` is a quite large binary image, `test_im` is a smaller synthetic binary image. Noisy versions are also provided. ###Code from pylab import imread, gray # Load the image and binarize im = np.mean(imread('dalmatian1.png'), axis=2) > 0.5 imshow(im) gray() # Add some noise noise = np.random.rand(*im.shape) > 0.9 noise_im = np.logical_xor(noise, im) figure() imshow(noise_im) test_im = np.zeros((10,10)) #test_im[5:8, 3:8] = 1.0 #test_im[5,5] = 1.0 figure() imshow(test_im) # Add some noise noise = np.random.rand(*test_im.shape) > 0.9 noise_test_im = np.logical_xor(noise, test_im) figure() imshow(noise_test_im) show() ###Output _____no_output_____ ###Markdown 3.1 Construct factor graph (10 points)Convert the Markov Random Field (Bishop, fig. 8.31) to a factor graph and instantiate it. ###Code def create_factor_graph(img): from itertools import product Y = np.empty(img.shape, dtype='object') X = np.empty(img.shape, dtype='object') fYX = np.empty(img.shape, dtype='object') fXR = np.empty((img.shape[0] - 1, img.shape[1] - 1), dtype='object') fXB = np.empty((img.shape[0] - 1, img.shape[1] - 1), dtype='object') init_prob = np.array([[0.8, 0.2], [0.2, 0.8]]) for y, x in product(range(img.shape[0]), range(img.shape[1])): Y[y, x] = Variable('y(%d,%d)' % (x, y), 2) Y[y, x].set_observed(img[y, x]) X[y, x] = Variable('x(%d,%d)' % (x, y), 2) fYX[y, x] = Factor('fXY(%d,%d)' % (x, y), init_prob, [Y[y, x], X[y, x]]) Y[y, x].pending.update([fYX[y, x]]) one_msg = np.ones(2) for y, x in product(range(img.shape[0] - 1), range(img.shape[1] - 1)): fXR[y, x] = Factor('fXR(%d,%d)' % (x, y), init_prob, [X[y, x], X[y, x + 1]]) fXB[y, x] = Factor('fXB(%d,%d)' % (x, y), init_prob, [X[y, x], X[y + 1, x]]) # Flooding schedule, simultaneously passing a message across every link in both direction # Bishop 8.4.7 X[y, x].in_msgs[fXR[y, x]] = one_msg X[y, x].in_msgs[fXB[y, x]] = one_msg X[y, x + 1].in_msgs[fXR[y, x]] = one_msg X[y + 1, x].in_msgs[fXB[y, x]] = one_msg return Y, X, fYX, fXR, fXB ###Output _____no_output_____ ###Markdown Score 10/10 3.2 Loopy max-sum (10 points)Implement the loopy max-sum algorithm, by passing messages from randomly chosen nodes iteratively until no more pending messages are created or a maximum number of iterations is reached. Think of a good way to initialize the messages in the graph. ###Code def denoise(img, niter=10): from itertools import product Y, X, fYX, fXR, fXB = create_factor_graph(img) for i in range(niter): fXX = np.hstack((fXR.flatten(), fXB.flatten())) np.random.shuffle(fXX) # Preordered, first observed variables, then factors between observed variables and # corresponding laten variables, then all latent variables and then factors between # latents in the random order. node_list = np.hstack((Y.flatten(), fYX.flatten(), X.flatten(), fXX)).tolist() apply_algorithm(node_list, max_sum) result = np.zeros_like(img) for y, x in product(range(img.shape[0]), range(img.shape[1])): result[y, x] = X[y, x].map_state() return result imshow(denoise(noise_test_im)) show() imshow(denoise(noise_im, niter=10)) show() ###Output _____no_output_____
Taller_3_redes_neuronales.ipynb
###Markdown INF-395/477 Redes Neuronales Artificiales I-2021 Tarea 3 - Redes Neuronales y Deep Learning **Temas** * Redes Neuronales Recurrentes.* Modelos Generativos.* Autoencoders.* Modelos Especiales.**** **Formalidades** * Equipos de trabajo de 3 personas.* Formato de entrega: envı́o de Jupyter notebook y link de video Youtube o plataforma a convenir, todo esto vía Aula. * Cada persona debe estar en condiciones de realizar una presentación del trabajo realizado por su equipo y responder preguntas sobre el mismo. El día de la defensa oral serán seleccionados 4-5 presentadores aleatoriamente de entre la clase.* La tarea tiene 2 partes: una pregunta de investigación donde se evalúa la metodología adoptada y un competencia donde pesa más el resultado y el ranking obtenido. * Fecha de Entrega de Código: Viernes 06 de Agosto de 2021 23:59:00 Hrs. (Se pueden cargar nuevas versiones hasta el Sábado 10 de Agosto 23:59:00 Hrs.).* Fecha de Defensa Oral: Viernes 06 de Agosto de 2021 en horario de clases.* Fecha de Entrega de Video: Sábado 7 de Agosto de 23:59:00 Hrs. **Entregables*** Se debe entregar el código utilizado en formato Jupyter/IPython notebook para poder **reproducir los resultados** presentados. El notebook debe estar ordenado para permitir identificar las celdas que a cada parte de la tarea y a las partes más importantes del desarrollo realizado. * Se debe preparar un video de **15 a 20 minutos** donde se explique cómo se abordó el taller. En el caso de la pregunta de investigación, se debe explicar muy bien (1) la metodología a utilizar (datasets considerados, pre-procesamiento de datos, arquitecturas a evaluar, parámetros a variar, parámetros a mantener constantes, métricas con las que se evaluará el resultado, etc.), (2) los resultados obtenidos y (3) las conclusiones de la experiencia. En el caso del desafío, se debe narrar brevemente la estrategia de solución adoptada enfocándose en aquello que los autores creen que la hará la solución ganadora. **** Taller 2 Sansanos Overfitted* Felipe Cisternas, 201873022-K* Lucas Galindo, 201873004-1 Contribucion Lucas Galindo:Visualización y preparación de datos del desafio, elaboración modelo desafio, encargado de las predicciones y submisiones a kaggle y binario Contribucion Felipe Cisternas: Busqueda de los datasets para la pregunta de investigacion, desarrollo de los modelos convolucionales y recurrentes y elaboracion de las conclusiones de la investigacion. Informe Binario School1. ¿Recomendarían participar en este tipos de desafíos a otros compañeros?: Si, siempre participar en un desafio ya sea en kaggle o Binario u otra plataforma es muy recomendable, sobre todo para aprender experimentando directamente desarollando modelos predictivos, y no solo quedarse en la teoria.2. ¿Participarían en una competencia abierta de este tipo (machine learning)?: Si, al final lo importante es poder compartir y aprender entre todos, en este tipo de competencias uno aprende muchas cosas para aplicarlas a futuros proyectos.3. ¿Qué fue lo que más me gustó de la experiencia binnario School?: lo que mas nos gusto fue que la plataforma es muy intuitiva y facil de utilizar4. ¿Qué fue lo que menos me gustó de la experiencia binnario School?: Una de las cosas que menos nos gusto fue que haya que utilizar una cuenta por equipo y no se puedan crear ni merger equipos para que cada integrante utilice su propia cuenta La sección de información del desafio es algo "agobiante", hay demasiado espacio entre cada párrafo y está todo separado por lineas de distinto grosor y texto de diferentes tamaño lo que no lo hace muy legible en nuestra opinión5. Lo que mejoraría de la plataforma binnario: lo que mejoraria la plataforma seria poder navegar y buscar entre los distintos desafios abiertos y datasets que se vayan subiendo a la web, para asi poder participar e interactuar. La plataforma necesita una mejora visual en nuestra opinion, el esquema de colores que escogieron no es malo pero los elementos no siempre combinan bien.6. Comentarios adicionales: Buena plataforma, tiene mucho futuro por delante y agregen la opción de crear equipos. Imports ###Code import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras.models import Sequential from sklearn.preprocessing import MinMaxScaler from tensorflow.keras import layers from tensorflow.keras.layers import Dense, Flatten, Dropout, LSTM, Bidirectional, GRU, SimpleRNN, Masking from sklearn.model_selection import train_test_split from keras.datasets import imdb from keras.preprocessing import sequence from keras.optimizers import RMSprop import matplotlib.pyplot as plt from keras.callbacks import History,EarlyStopping from collections import deque from keras import backend as K from random import random from datetime import datetime, timedelta ###Output _____no_output_____ ###Markdown **Pregunta de Investigación (60%)** Refute o demuestre una de las siguientes hipótesis entregando evidencia experimental a favor o en contra: > **5. Redes Recurrentes**: No tiene sentido usar una red convolucional para aprendizaje de secuencias, su error será siempre mayor que el de una red recurrente. Dataset 1 imdbDataset proveniente de Keras ###Code max_features = 10000 max_len = 500 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=max_len) x_test = sequence.pad_sequences(x_test, maxlen=max_len) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) x_train_rnn = x_train.reshape(len(x_train)//25,25,500) x_test_rnn = x_test.reshape(len(x_test)//25,25,500) print(x_train_rnn.shape) print(x_test_rnn.shape) ###Output (1000, 25, 500) (1000, 25, 500) ###Markdown Modelo CNN ###Code model = Sequential() model.add(layers.Embedding(max_features, 128, input_length=max_len)) model.add(layers.Conv1D(32, 7, activation='relu')) model.add(layers.MaxPooling1D(5)) model.add(layers.Conv1D(32, 7, activation='relu')) model.add(layers.GlobalMaxPooling1D()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1)) model.summary() model.compile(optimizer=RMSprop(learning_rate=1e-4), loss='binary_crossentropy', metrics=['acc']) history = model.fit(x_train, y_train, epochs=300, batch_size=128, validation_split=0.2, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Binary Cross Entropy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 157/157 [==============================] - 8s 33ms/step - loss: 1.4872 - acc: 0.5019 - val_loss: 0.7126 - val_acc: 0.5062 Epoch 2/300 157/157 [==============================] - 5s 30ms/step - loss: 0.7332 - acc: 0.5128 - val_loss: 0.6871 - val_acc: 0.5458 Epoch 3/300 157/157 [==============================] - 5s 32ms/step - loss: 0.7119 - acc: 0.5332 - val_loss: 0.6761 - val_acc: 0.6186 Epoch 4/300 157/157 [==============================] - 5s 32ms/step - loss: 0.6859 - acc: 0.5666 - val_loss: 0.6561 - val_acc: 0.7022 Epoch 5/300 157/157 [==============================] - 5s 31ms/step - loss: 0.6559 - acc: 0.6112 - val_loss: 0.6165 - val_acc: 0.7546 Epoch 6/300 157/157 [==============================] - 5s 32ms/step - loss: 0.6016 - acc: 0.6762 - val_loss: 0.5646 - val_acc: 0.7560 Epoch 7/300 157/157 [==============================] - 5s 31ms/step - loss: 0.5402 - acc: 0.7362 - val_loss: 0.4920 - val_acc: 0.8120 Epoch 8/300 157/157 [==============================] - 5s 31ms/step - loss: 0.4873 - acc: 0.7783 - val_loss: 0.4443 - val_acc: 0.8244 Epoch 9/300 157/157 [==============================] - 5s 30ms/step - loss: 0.4524 - acc: 0.8062 - val_loss: 0.4236 - val_acc: 0.8382 Epoch 10/300 157/157 [==============================] - 5s 30ms/step - loss: 0.4296 - acc: 0.8267 - val_loss: 0.4361 - val_acc: 0.8380 Epoch 11/300 157/157 [==============================] - 5s 30ms/step - loss: 0.4101 - acc: 0.8429 - val_loss: 0.4270 - val_acc: 0.8500 Epoch 12/300 157/157 [==============================] - 5s 30ms/step - loss: 0.3847 - acc: 0.8569 - val_loss: 0.4351 - val_acc: 0.8540 Epoch 13/300 157/157 [==============================] - 5s 30ms/step - loss: 0.3648 - acc: 0.8731 - val_loss: 0.4497 - val_acc: 0.8562 Epoch 14/300 157/157 [==============================] - 5s 30ms/step - loss: 0.3430 - acc: 0.8831 - val_loss: 0.4519 - val_acc: 0.8596 Epoch 15/300 157/157 [==============================] - 5s 30ms/step - loss: 0.3562 - acc: 0.8907 - val_loss: 0.4599 - val_acc: 0.8602 Epoch 16/300 157/157 [==============================] - 5s 31ms/step - loss: 0.3316 - acc: 0.8965 - val_loss: 0.4720 - val_acc: 0.8610 Epoch 17/300 157/157 [==============================] - 5s 32ms/step - loss: 0.3186 - acc: 0.9017 - val_loss: 0.4840 - val_acc: 0.8606 Epoch 18/300 157/157 [==============================] - 5s 31ms/step - loss: 0.3065 - acc: 0.9094 - val_loss: 0.5126 - val_acc: 0.8614 Epoch 19/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2850 - acc: 0.9158 - val_loss: 0.5156 - val_acc: 0.8626 Epoch 20/300 157/157 [==============================] - 5s 32ms/step - loss: 0.2823 - acc: 0.9207 - val_loss: 0.5613 - val_acc: 0.8626 Epoch 21/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2694 - acc: 0.9258 - val_loss: 0.5514 - val_acc: 0.8636 Epoch 22/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2616 - acc: 0.9303 - val_loss: 0.5569 - val_acc: 0.8642 Epoch 23/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2472 - acc: 0.9340 - val_loss: 0.5854 - val_acc: 0.8626 Epoch 24/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2426 - acc: 0.9380 - val_loss: 0.6102 - val_acc: 0.8636 Epoch 25/300 157/157 [==============================] - 5s 32ms/step - loss: 0.2314 - acc: 0.9423 - val_loss: 0.6675 - val_acc: 0.8654 Epoch 26/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2325 - acc: 0.9464 - val_loss: 0.6610 - val_acc: 0.8658 Epoch 27/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2105 - acc: 0.9490 - val_loss: 0.6874 - val_acc: 0.8670 Epoch 28/300 157/157 [==============================] - 5s 31ms/step - loss: 0.2110 - acc: 0.9534 - val_loss: 0.6783 - val_acc: 0.8654 Epoch 29/300 157/157 [==============================] - 5s 30ms/step - loss: 0.2063 - acc: 0.9528 - val_loss: 0.7079 - val_acc: 0.8660 ###Markdown Modelo RNN ###Code timesteps = x_train_rnn.shape[1] features = x_train_rnn.shape[2] inputs_features_1 = tf.keras.layers.Input(shape=(timesteps,features)) x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(100))(inputs_features_1) x = tf.keras.layers.Dense(20, activation='relu')(x) x = tf.keras.layers.Dense(20, activation='relu')(x) x = tf.keras.layers.Dense(20, activation='relu')(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(1)(x) model2 = tf.keras.Model(inputs=inputs_features_1, outputs=x) model2.summary() model2.compile(optimizer=RMSprop(learning_rate=1e-4), loss='binary_crossentropy', metrics=['acc']) history = model2.fit(x_train_rnn, y_train, epochs=300, batch_size=4, validation_split=0.2, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Binary Cross Entropy') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 200/200 [==============================] - 6s 12ms/step - loss: 5.8962 - acc: 0.5013 - val_loss: 6.3116 - val_acc: 0.5350 Epoch 2/300 200/200 [==============================] - 2s 8ms/step - loss: 5.0051 - acc: 0.5025 - val_loss: 5.4955 - val_acc: 0.5350 Epoch 3/300 200/200 [==============================] - 2s 8ms/step - loss: 4.1263 - acc: 0.5113 - val_loss: 4.5690 - val_acc: 0.5350 Epoch 4/300 200/200 [==============================] - 2s 8ms/step - loss: 3.4458 - acc: 0.5200 - val_loss: 3.1527 - val_acc: 0.5350 Epoch 5/300 200/200 [==============================] - 2s 8ms/step - loss: 2.5455 - acc: 0.5350 - val_loss: 2.0649 - val_acc: 0.5300 Epoch 6/300 200/200 [==============================] - 2s 8ms/step - loss: 2.2299 - acc: 0.5412 - val_loss: 1.6991 - val_acc: 0.5250 Epoch 7/300 200/200 [==============================] - 2s 8ms/step - loss: 1.9772 - acc: 0.5525 - val_loss: 1.3968 - val_acc: 0.4800 Epoch 8/300 200/200 [==============================] - 2s 8ms/step - loss: 1.7110 - acc: 0.5500 - val_loss: 1.1567 - val_acc: 0.4850 Epoch 9/300 200/200 [==============================] - 2s 8ms/step - loss: 1.4332 - acc: 0.5738 - val_loss: 1.1937 - val_acc: 0.4650 Epoch 10/300 200/200 [==============================] - 2s 8ms/step - loss: 1.1661 - acc: 0.5938 - val_loss: 1.0572 - val_acc: 0.4950 Epoch 11/300 200/200 [==============================] - 2s 8ms/step - loss: 1.3097 - acc: 0.5863 - val_loss: 1.0600 - val_acc: 0.4700 Epoch 12/300 200/200 [==============================] - 2s 8ms/step - loss: 1.2116 - acc: 0.6225 - val_loss: 1.0606 - val_acc: 0.5000 Epoch 13/300 200/200 [==============================] - 2s 8ms/step - loss: 1.0807 - acc: 0.6000 - val_loss: 1.0464 - val_acc: 0.4850 Epoch 14/300 200/200 [==============================] - 2s 8ms/step - loss: 0.9566 - acc: 0.6263 - val_loss: 0.9262 - val_acc: 0.4800 Epoch 15/300 200/200 [==============================] - 2s 8ms/step - loss: 0.8910 - acc: 0.6450 - val_loss: 1.0036 - val_acc: 0.4700 Epoch 16/300 200/200 [==============================] - 2s 8ms/step - loss: 0.9606 - acc: 0.6488 - val_loss: 0.9545 - val_acc: 0.4550 Epoch 17/300 200/200 [==============================] - 2s 8ms/step - loss: 0.9384 - acc: 0.6488 - val_loss: 0.9694 - val_acc: 0.4900 Epoch 18/300 200/200 [==============================] - 2s 9ms/step - loss: 0.8462 - acc: 0.6888 - val_loss: 0.9504 - val_acc: 0.5250 Epoch 19/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7997 - acc: 0.6975 - val_loss: 1.0340 - val_acc: 0.5050 Epoch 20/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7887 - acc: 0.7212 - val_loss: 1.0764 - val_acc: 0.5150 Epoch 21/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7383 - acc: 0.7088 - val_loss: 1.2949 - val_acc: 0.5200 Epoch 22/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7738 - acc: 0.7275 - val_loss: 1.1788 - val_acc: 0.5200 Epoch 23/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7570 - acc: 0.7275 - val_loss: 1.3691 - val_acc: 0.4650 Epoch 24/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7095 - acc: 0.7462 - val_loss: 1.4685 - val_acc: 0.4900 Epoch 25/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6196 - acc: 0.7738 - val_loss: 1.9993 - val_acc: 0.5000 Epoch 26/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6923 - acc: 0.7563 - val_loss: 1.9096 - val_acc: 0.5000 Epoch 27/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6933 - acc: 0.7738 - val_loss: 2.0304 - val_acc: 0.4550 Epoch 28/300 200/200 [==============================] - 2s 9ms/step - loss: 0.7510 - acc: 0.7850 - val_loss: 2.3491 - val_acc: 0.4200 Epoch 29/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6399 - acc: 0.8100 - val_loss: 2.2776 - val_acc: 0.4650 Epoch 30/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6039 - acc: 0.8087 - val_loss: 2.4489 - val_acc: 0.4650 Epoch 31/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6334 - acc: 0.8275 - val_loss: 2.6442 - val_acc: 0.4750 Epoch 32/300 200/200 [==============================] - 2s 9ms/step - loss: 0.6470 - acc: 0.8087 - val_loss: 2.9312 - val_acc: 0.4650 Epoch 33/300 200/200 [==============================] - 2s 8ms/step - loss: 0.5776 - acc: 0.8275 - val_loss: 2.9269 - val_acc: 0.4850 Epoch 34/300 200/200 [==============================] - 2s 8ms/step - loss: 0.5467 - acc: 0.8375 - val_loss: 3.1628 - val_acc: 0.4600 ###Markdown Datset 2 House Pricing PredictionData de https://www.kaggle.com/htagholdings/property-sales ###Code df = pd.read_csv('/content/ma_lga_12345.csv',names=['saledate','MA','tipo','bedrooms'],header=0) df df2 = df.dropna() df['saledate'] = pd.to_datetime(df['saledate'],infer_datetime_format=True) df['year'] = df['saledate'].apply(lambda d: d.year) df['month'] = df['saledate'].apply(lambda d: d.month) df['day'] = df['saledate'].apply(lambda d: d.day) tipos = pd.get_dummies(df[['tipo']]) df[['house','unit']] = tipos del df['saledate'] del df['tipo'] df y_train2 = df["MA"] y_train2 = y_train2.values[:-2] x_train2 = df[['year', 'month', 'day','bedrooms','house','unit']] for name in x_train2.columns: scaler = MinMaxScaler() scaler.fit(x_train2[[name]]) x_train2[name] = scaler.transform(x_train2[[name]]) x_train2 = x_train2.values[0:-2] x_train2 print(x_train2.shape) print(y_train2.shape) x_train2_rnn = x_train2.reshape(len(x_train2)//15,15,x_train2.shape[1]) print(x_train2_rnn.shape) ###Output (23, 15, 6) ###Markdown Modelo CNN ###Code input_shape = x_train2_rnn.shape inputs = tf.keras.layers.Input(shape=input_shape[1:]) cnn2 = tf.keras.layers.Conv1D(32,3, activation='relu')(inputs) #cnn2 = tf.keras.layers.MaxPooling1D(5)(cnn2) cnn2 = tf.keras.layers.Conv1D(32,3, activation='relu')(cnn2) cnn2 = tf.keras.layers.GlobalMaxPooling1D()(cnn2) cnn2 = tf.keras.layers.Dropout(0.5)(cnn2) cnn2 = tf.keras.layers.Dense(1)(cnn2) model_cnn_2 = tf.keras.Model(inputs=inputs, outputs=cnn2) model_cnn_2.summary() model_cnn_2.compile(optimizer='adam',loss='mean_squared_error') history = model_cnn_2.fit(x_train2_rnn, y_train2, epochs=300, batch_size=1, validation_split=0.1, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Regression') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 20/20 [==============================] - 1s 8ms/step - loss: 205608681472.0000 - val_loss: 211666944000.0000 Epoch 2/300 20/20 [==============================] - 0s 3ms/step - loss: 205607419904.0000 - val_loss: 211665764352.0000 Epoch 3/300 20/20 [==============================] - 0s 3ms/step - loss: 205605535744.0000 - val_loss: 211663585280.0000 Epoch 4/300 20/20 [==============================] - 0s 3ms/step - loss: 205602258944.0000 - val_loss: 211659177984.0000 Epoch 5/300 20/20 [==============================] - 0s 3ms/step - loss: 205595983872.0000 - val_loss: 211651346432.0000 Epoch 6/300 20/20 [==============================] - 0s 3ms/step - loss: 205584564224.0000 - val_loss: 211637256192.0000 Epoch 7/300 20/20 [==============================] - 0s 3ms/step - loss: 205564248064.0000 - val_loss: 211613581312.0000 Epoch 8/300 20/20 [==============================] - 0s 3ms/step - loss: 205529726976.0000 - val_loss: 211575455744.0000 Epoch 9/300 20/20 [==============================] - 0s 3ms/step - loss: 205478543360.0000 - val_loss: 211520569344.0000 Epoch 10/300 20/20 [==============================] - 0s 3ms/step - loss: 205433126912.0000 - val_loss: 211449872384.0000 Epoch 11/300 20/20 [==============================] - 0s 3ms/step - loss: 205353025536.0000 - val_loss: 211364003840.0000 Epoch 12/300 20/20 [==============================] - 0s 3ms/step - loss: 205246889984.0000 - val_loss: 211252264960.0000 Epoch 13/300 20/20 [==============================] - 0s 3ms/step - loss: 205121748992.0000 - val_loss: 211117326336.0000 Epoch 14/300 20/20 [==============================] - 0s 3ms/step - loss: 204930514944.0000 - val_loss: 210954485760.0000 Epoch 15/300 20/20 [==============================] - 0s 3ms/step - loss: 204682002432.0000 - val_loss: 210750980096.0000 Epoch 16/300 20/20 [==============================] - 0s 3ms/step - loss: 204473597952.0000 - val_loss: 210523701248.0000 Epoch 17/300 20/20 [==============================] - 0s 3ms/step - loss: 204187893760.0000 - val_loss: 210252201984.0000 Epoch 18/300 20/20 [==============================] - 0s 3ms/step - loss: 203985534976.0000 - val_loss: 209960697856.0000 Epoch 19/300 20/20 [==============================] - 0s 3ms/step - loss: 203759992832.0000 - val_loss: 209646780416.0000 Epoch 20/300 20/20 [==============================] - 0s 3ms/step - loss: 203376607232.0000 - val_loss: 209303420928.0000 Epoch 21/300 20/20 [==============================] - 0s 4ms/step - loss: 202866016256.0000 - val_loss: 208909156352.0000 Epoch 22/300 20/20 [==============================] - 0s 3ms/step - loss: 202412572672.0000 - val_loss: 208458891264.0000 Epoch 23/300 20/20 [==============================] - 0s 3ms/step - loss: 201959030784.0000 - val_loss: 207984803840.0000 Epoch 24/300 20/20 [==============================] - 0s 3ms/step - loss: 201459859456.0000 - val_loss: 207467888640.0000 Epoch 25/300 20/20 [==============================] - 0s 3ms/step - loss: 200778563584.0000 - val_loss: 206918975488.0000 Epoch 26/300 20/20 [==============================] - 0s 3ms/step - loss: 200407875584.0000 - val_loss: 206330527744.0000 Epoch 27/300 20/20 [==============================] - 0s 3ms/step - loss: 198961004544.0000 - val_loss: 205636632576.0000 Epoch 28/300 20/20 [==============================] - 0s 3ms/step - loss: 198476791808.0000 - val_loss: 204895436800.0000 Epoch 29/300 20/20 [==============================] - 0s 3ms/step - loss: 197719326720.0000 - val_loss: 204128960512.0000 Epoch 30/300 20/20 [==============================] - 0s 3ms/step - loss: 197089951744.0000 - val_loss: 203305566208.0000 Epoch 31/300 20/20 [==============================] - 0s 4ms/step - loss: 195872604160.0000 - val_loss: 202458120192.0000 Epoch 32/300 20/20 [==============================] - 0s 3ms/step - loss: 194576220160.0000 - val_loss: 201513910272.0000 Epoch 33/300 20/20 [==============================] - 0s 3ms/step - loss: 195216916480.0000 - val_loss: 200615444480.0000 Epoch 34/300 20/20 [==============================] - 0s 3ms/step - loss: 192518193152.0000 - val_loss: 199610859520.0000 Epoch 35/300 20/20 [==============================] - 0s 3ms/step - loss: 192596131840.0000 - val_loss: 198550142976.0000 Epoch 36/300 20/20 [==============================] - 0s 3ms/step - loss: 191204474880.0000 - val_loss: 197448040448.0000 Epoch 37/300 20/20 [==============================] - 0s 3ms/step - loss: 188628303872.0000 - val_loss: 196290936832.0000 Epoch 38/300 20/20 [==============================] - 0s 3ms/step - loss: 185812123648.0000 - val_loss: 194960113664.0000 Epoch 39/300 20/20 [==============================] - 0s 3ms/step - loss: 189110173696.0000 - val_loss: 193706459136.0000 Epoch 40/300 20/20 [==============================] - 0s 3ms/step - loss: 185726992384.0000 - val_loss: 192446218240.0000 Epoch 41/300 20/20 [==============================] - 0s 3ms/step - loss: 184804179968.0000 - val_loss: 191059640320.0000 Epoch 42/300 20/20 [==============================] - 0s 3ms/step - loss: 182125674496.0000 - val_loss: 189658431488.0000 Epoch 43/300 20/20 [==============================] - 0s 3ms/step - loss: 180073562112.0000 - val_loss: 188164194304.0000 Epoch 44/300 20/20 [==============================] - 0s 3ms/step - loss: 180420591616.0000 - val_loss: 186649853952.0000 Epoch 45/300 20/20 [==============================] - 0s 3ms/step - loss: 178407243776.0000 - val_loss: 185045630976.0000 Epoch 46/300 20/20 [==============================] - 0s 4ms/step - loss: 177417715712.0000 - val_loss: 183462608896.0000 Epoch 47/300 20/20 [==============================] - 0s 3ms/step - loss: 174849277952.0000 - val_loss: 181809004544.0000 Epoch 48/300 20/20 [==============================] - 0s 3ms/step - loss: 175269347328.0000 - val_loss: 180182138880.0000 Epoch 49/300 20/20 [==============================] - 0s 3ms/step - loss: 171152015360.0000 - val_loss: 178423578624.0000 Epoch 50/300 20/20 [==============================] - 0s 3ms/step - loss: 171516805120.0000 - val_loss: 176658595840.0000 Epoch 51/300 20/20 [==============================] - 0s 3ms/step - loss: 172150587392.0000 - val_loss: 174967308288.0000 Epoch 52/300 20/20 [==============================] - 0s 3ms/step - loss: 169775136768.0000 - val_loss: 173200703488.0000 Epoch 53/300 20/20 [==============================] - 0s 3ms/step - loss: 167321812992.0000 - val_loss: 171429052416.0000 Epoch 54/300 20/20 [==============================] - 0s 3ms/step - loss: 168446705664.0000 - val_loss: 169633726464.0000 Epoch 55/300 20/20 [==============================] - 0s 3ms/step - loss: 161523564544.0000 - val_loss: 167744978944.0000 Epoch 56/300 20/20 [==============================] - 0s 3ms/step - loss: 160241991680.0000 - val_loss: 165763760128.0000 Epoch 57/300 20/20 [==============================] - 0s 3ms/step - loss: 153708937216.0000 - val_loss: 163631595520.0000 Epoch 58/300 20/20 [==============================] - 0s 3ms/step - loss: 160516702208.0000 - val_loss: 161617788928.0000 Epoch 59/300 20/20 [==============================] - 0s 3ms/step - loss: 154675314688.0000 - val_loss: 159540199424.0000 Epoch 60/300 20/20 [==============================] - 0s 3ms/step - loss: 151743545344.0000 - val_loss: 157424533504.0000 Epoch 61/300 20/20 [==============================] - 0s 3ms/step - loss: 146377867264.0000 - val_loss: 155096727552.0000 Epoch 62/300 20/20 [==============================] - 0s 3ms/step - loss: 144010149888.0000 - val_loss: 152761516032.0000 Epoch 63/300 20/20 [==============================] - 0s 3ms/step - loss: 141595115520.0000 - val_loss: 150401089536.0000 Epoch 64/300 20/20 [==============================] - 0s 3ms/step - loss: 135823532032.0000 - val_loss: 147918176256.0000 Epoch 65/300 20/20 [==============================] - 0s 3ms/step - loss: 141597671424.0000 - val_loss: 145564680192.0000 Epoch 66/300 20/20 [==============================] - 0s 3ms/step - loss: 135988396032.0000 - val_loss: 143181660160.0000 Epoch 67/300 20/20 [==============================] - 0s 3ms/step - loss: 134595190784.0000 - val_loss: 140783648768.0000 Epoch 68/300 20/20 [==============================] - 0s 3ms/step - loss: 134035947520.0000 - val_loss: 138311745536.0000 Epoch 69/300 20/20 [==============================] - 0s 3ms/step - loss: 129891811328.0000 - val_loss: 135916855296.0000 Epoch 70/300 20/20 [==============================] - 0s 3ms/step - loss: 118298001408.0000 - val_loss: 133366063104.0000 Epoch 71/300 20/20 [==============================] - 0s 3ms/step - loss: 126245888000.0000 - val_loss: 130775015424.0000 Epoch 72/300 20/20 [==============================] - 0s 3ms/step - loss: 117954248704.0000 - val_loss: 128217260032.0000 Epoch 73/300 20/20 [==============================] - 0s 4ms/step - loss: 122069229568.0000 - val_loss: 125670424576.0000 Epoch 74/300 20/20 [==============================] - 0s 3ms/step - loss: 104400928768.0000 - val_loss: 122927210496.0000 Epoch 75/300 20/20 [==============================] - 0s 3ms/step - loss: 106449174528.0000 - val_loss: 120225660928.0000 Epoch 76/300 20/20 [==============================] - 0s 3ms/step - loss: 109165494272.0000 - val_loss: 117594382336.0000 Epoch 77/300 20/20 [==============================] - 0s 3ms/step - loss: 111960391680.0000 - val_loss: 115088916480.0000 Epoch 78/300 20/20 [==============================] - 0s 3ms/step - loss: 110029266944.0000 - val_loss: 112646815744.0000 Epoch 79/300 20/20 [==============================] - 0s 3ms/step - loss: 98845745152.0000 - val_loss: 110036639744.0000 Epoch 80/300 20/20 [==============================] - 0s 3ms/step - loss: 101894406144.0000 - val_loss: 107390853120.0000 Epoch 81/300 20/20 [==============================] - 0s 3ms/step - loss: 97274552320.0000 - val_loss: 104761655296.0000 Epoch 82/300 20/20 [==============================] - 0s 3ms/step - loss: 92289220608.0000 - val_loss: 102220070912.0000 Epoch 83/300 20/20 [==============================] - 0s 3ms/step - loss: 95666479104.0000 - val_loss: 99648741376.0000 Epoch 84/300 20/20 [==============================] - 0s 3ms/step - loss: 85163679744.0000 - val_loss: 97098817536.0000 Epoch 85/300 20/20 [==============================] - 0s 3ms/step - loss: 87284637696.0000 - val_loss: 94496686080.0000 Epoch 86/300 20/20 [==============================] - 0s 3ms/step - loss: 85809348608.0000 - val_loss: 91877646336.0000 Epoch 87/300 20/20 [==============================] - 0s 3ms/step - loss: 73249619968.0000 - val_loss: 89326657536.0000 Epoch 88/300 20/20 [==============================] - 0s 3ms/step - loss: 82999754752.0000 - val_loss: 86780092416.0000 Epoch 89/300 20/20 [==============================] - 0s 3ms/step - loss: 83801284608.0000 - val_loss: 84365688832.0000 Epoch 90/300 20/20 [==============================] - 0s 3ms/step - loss: 78856994816.0000 - val_loss: 81926520832.0000 Epoch 91/300 20/20 [==============================] - 0s 3ms/step - loss: 72717647872.0000 - val_loss: 79616679936.0000 Epoch 92/300 20/20 [==============================] - 0s 3ms/step - loss: 78643675136.0000 - val_loss: 77252296704.0000 Epoch 93/300 20/20 [==============================] - 0s 3ms/step - loss: 59499749376.0000 - val_loss: 74975158272.0000 Epoch 94/300 20/20 [==============================] - 0s 4ms/step - loss: 57254227968.0000 - val_loss: 72559468544.0000 Epoch 95/300 20/20 [==============================] - 0s 3ms/step - loss: 62867800064.0000 - val_loss: 70206062592.0000 Epoch 96/300 20/20 [==============================] - 0s 3ms/step - loss: 69181161472.0000 - val_loss: 67966119936.0000 Epoch 97/300 20/20 [==============================] - 0s 3ms/step - loss: 57815629824.0000 - val_loss: 65674612736.0000 Epoch 98/300 20/20 [==============================] - 0s 3ms/step - loss: 54251020288.0000 - val_loss: 63506219008.0000 Epoch 99/300 20/20 [==============================] - 0s 3ms/step - loss: 64147410944.0000 - val_loss: 61354790912.0000 Epoch 100/300 20/20 [==============================] - 0s 3ms/step - loss: 51510353920.0000 - val_loss: 59195588608.0000 Epoch 101/300 20/20 [==============================] - 0s 3ms/step - loss: 54616055808.0000 - val_loss: 57123045376.0000 Epoch 102/300 20/20 [==============================] - 0s 3ms/step - loss: 48274120704.0000 - val_loss: 55059644416.0000 Epoch 103/300 20/20 [==============================] - 0s 3ms/step - loss: 45093552128.0000 - val_loss: 53082783744.0000 Epoch 104/300 20/20 [==============================] - 0s 3ms/step - loss: 39509819392.0000 - val_loss: 51218305024.0000 Epoch 105/300 20/20 [==============================] - 0s 3ms/step - loss: 39313129472.0000 - val_loss: 49463439360.0000 Epoch 106/300 20/20 [==============================] - 0s 3ms/step - loss: 42846248960.0000 - val_loss: 47580348416.0000 Epoch 107/300 20/20 [==============================] - 0s 4ms/step - loss: 35079516160.0000 - val_loss: 45692530688.0000 Epoch 108/300 20/20 [==============================] - 0s 3ms/step - loss: 38368620544.0000 - val_loss: 43891720192.0000 Epoch 109/300 20/20 [==============================] - 0s 3ms/step - loss: 37445963776.0000 - val_loss: 42245955584.0000 Epoch 110/300 20/20 [==============================] - 0s 3ms/step - loss: 30992951296.0000 - val_loss: 40648142848.0000 Epoch 111/300 20/20 [==============================] - 0s 3ms/step - loss: 36302569472.0000 - val_loss: 39088365568.0000 Epoch 112/300 20/20 [==============================] - 0s 3ms/step - loss: 37409808384.0000 - val_loss: 37605666816.0000 Epoch 113/300 20/20 [==============================] - 0s 3ms/step - loss: 27647289344.0000 - val_loss: 36248924160.0000 Epoch 114/300 20/20 [==============================] - 0s 3ms/step - loss: 31879157760.0000 - val_loss: 34628947968.0000 Epoch 115/300 20/20 [==============================] - 0s 3ms/step - loss: 37523636224.0000 - val_loss: 33167136768.0000 Epoch 116/300 20/20 [==============================] - 0s 3ms/step - loss: 30014472192.0000 - val_loss: 31924623360.0000 Epoch 117/300 20/20 [==============================] - 0s 3ms/step - loss: 33733332992.0000 - val_loss: 30567180288.0000 Epoch 118/300 20/20 [==============================] - 0s 3ms/step - loss: 31544381440.0000 - val_loss: 29253042176.0000 Epoch 119/300 20/20 [==============================] - 0s 3ms/step - loss: 25858072576.0000 - val_loss: 27949221888.0000 Epoch 120/300 20/20 [==============================] - 0s 3ms/step - loss: 28633169920.0000 - val_loss: 26844600320.0000 Epoch 121/300 20/20 [==============================] - 0s 3ms/step - loss: 18524952576.0000 - val_loss: 25719308288.0000 Epoch 122/300 20/20 [==============================] - 0s 3ms/step - loss: 31646253056.0000 - val_loss: 24563900416.0000 Epoch 123/300 20/20 [==============================] - 0s 3ms/step - loss: 15330977792.0000 - val_loss: 23538302976.0000 Epoch 124/300 20/20 [==============================] - 0s 3ms/step - loss: 17967900672.0000 - val_loss: 22706780160.0000 Epoch 125/300 20/20 [==============================] - 0s 3ms/step - loss: 22906417152.0000 - val_loss: 21949241344.0000 Epoch 126/300 20/20 [==============================] - 0s 3ms/step - loss: 18549653504.0000 - val_loss: 21309186048.0000 Epoch 127/300 20/20 [==============================] - 0s 3ms/step - loss: 25737506816.0000 - val_loss: 20583231488.0000 Epoch 128/300 20/20 [==============================] - 0s 3ms/step - loss: 27258566656.0000 - val_loss: 19789897728.0000 Epoch 129/300 20/20 [==============================] - 0s 3ms/step - loss: 24608591872.0000 - val_loss: 19013052416.0000 Epoch 130/300 20/20 [==============================] - 0s 3ms/step - loss: 27049725952.0000 - val_loss: 18130229248.0000 Epoch 131/300 20/20 [==============================] - 0s 3ms/step - loss: 21111177216.0000 - val_loss: 17317269504.0000 Epoch 132/300 20/20 [==============================] - 0s 3ms/step - loss: 20508700672.0000 - val_loss: 16529161216.0000 Epoch 133/300 20/20 [==============================] - 0s 3ms/step - loss: 19523538944.0000 - val_loss: 15895612416.0000 Epoch 134/300 20/20 [==============================] - 0s 3ms/step - loss: 16991810560.0000 - val_loss: 15310001152.0000 Epoch 135/300 20/20 [==============================] - 0s 3ms/step - loss: 12276226048.0000 - val_loss: 14576933888.0000 Epoch 136/300 20/20 [==============================] - 0s 3ms/step - loss: 25672660992.0000 - val_loss: 14096621568.0000 Epoch 137/300 20/20 [==============================] - 0s 3ms/step - loss: 18434926592.0000 - val_loss: 13773553664.0000 Epoch 138/300 20/20 [==============================] - 0s 3ms/step - loss: 24920403968.0000 - val_loss: 13167090688.0000 Epoch 139/300 20/20 [==============================] - 0s 3ms/step - loss: 19523346432.0000 - val_loss: 12580656128.0000 Epoch 140/300 20/20 [==============================] - 0s 3ms/step - loss: 11599988736.0000 - val_loss: 12121546752.0000 Epoch 141/300 20/20 [==============================] - 0s 3ms/step - loss: 17261965312.0000 - val_loss: 11865939968.0000 Epoch 142/300 20/20 [==============================] - 0s 3ms/step - loss: 26795378688.0000 - val_loss: 11326351360.0000 Epoch 143/300 20/20 [==============================] - 0s 3ms/step - loss: 27160979456.0000 - val_loss: 11004417024.0000 Epoch 144/300 20/20 [==============================] - 0s 3ms/step - loss: 17369638912.0000 - val_loss: 10618434560.0000 Epoch 145/300 20/20 [==============================] - 0s 3ms/step - loss: 10466732032.0000 - val_loss: 10195336192.0000 Epoch 146/300 20/20 [==============================] - 0s 3ms/step - loss: 17743079424.0000 - val_loss: 9963603968.0000 Epoch 147/300 20/20 [==============================] - 0s 3ms/step - loss: 8744268800.0000 - val_loss: 9692166144.0000 Epoch 148/300 20/20 [==============================] - 0s 3ms/step - loss: 17599297536.0000 - val_loss: 9489363968.0000 Epoch 149/300 20/20 [==============================] - 0s 3ms/step - loss: 16356652032.0000 - val_loss: 9170359296.0000 Epoch 150/300 20/20 [==============================] - 0s 3ms/step - loss: 8144819200.0000 - val_loss: 8861375488.0000 Epoch 151/300 20/20 [==============================] - 0s 3ms/step - loss: 12512176128.0000 - val_loss: 8534967808.0000 Epoch 152/300 20/20 [==============================] - 0s 4ms/step - loss: 15981587456.0000 - val_loss: 8439239168.0000 Epoch 153/300 20/20 [==============================] - 0s 3ms/step - loss: 21813719040.0000 - val_loss: 8349499904.0000 Epoch 154/300 20/20 [==============================] - 0s 3ms/step - loss: 10294701056.0000 - val_loss: 8220780032.0000 Epoch 155/300 20/20 [==============================] - 0s 3ms/step - loss: 12668459008.0000 - val_loss: 8213634560.0000 Epoch 156/300 20/20 [==============================] - 0s 3ms/step - loss: 16156286976.0000 - val_loss: 8193529344.0000 Epoch 157/300 20/20 [==============================] - 0s 3ms/step - loss: 15771826176.0000 - val_loss: 8009771520.0000 Epoch 158/300 20/20 [==============================] - 0s 3ms/step - loss: 13765713920.0000 - val_loss: 7833147904.0000 Epoch 159/300 20/20 [==============================] - 0s 3ms/step - loss: 17356836864.0000 - val_loss: 7496427520.0000 Epoch 160/300 20/20 [==============================] - 0s 3ms/step - loss: 12639873024.0000 - val_loss: 7251528192.0000 Epoch 161/300 20/20 [==============================] - 0s 3ms/step - loss: 10681849856.0000 - val_loss: 7016700416.0000 Epoch 162/300 20/20 [==============================] - 0s 3ms/step - loss: 14622705664.0000 - val_loss: 6873185792.0000 Epoch 163/300 20/20 [==============================] - 0s 3ms/step - loss: 11830250496.0000 - val_loss: 6681437696.0000 Epoch 164/300 20/20 [==============================] - 0s 3ms/step - loss: 20568096768.0000 - val_loss: 6597759488.0000 Epoch 165/300 20/20 [==============================] - 0s 3ms/step - loss: 19965966336.0000 - val_loss: 6644621824.0000 Epoch 166/300 20/20 [==============================] - 0s 3ms/step - loss: 14197901312.0000 - val_loss: 6809307648.0000 Epoch 167/300 20/20 [==============================] - 0s 3ms/step - loss: 22223890432.0000 - val_loss: 6813925376.0000 Epoch 168/300 20/20 [==============================] - 0s 4ms/step - loss: 24531419136.0000 - val_loss: 6988910592.0000 Epoch 169/300 20/20 [==============================] - 0s 3ms/step - loss: 19348805632.0000 - val_loss: 6846989824.0000 Epoch 170/300 20/20 [==============================] - 0s 4ms/step - loss: 20657778688.0000 - val_loss: 6538095104.0000 Epoch 171/300 20/20 [==============================] - 0s 3ms/step - loss: 22460850176.0000 - val_loss: 6126618624.0000 Epoch 172/300 20/20 [==============================] - 0s 3ms/step - loss: 17541079040.0000 - val_loss: 5900609536.0000 Epoch 173/300 20/20 [==============================] - 0s 3ms/step - loss: 13225965568.0000 - val_loss: 5829916672.0000 Epoch 174/300 20/20 [==============================] - 0s 3ms/step - loss: 19070246912.0000 - val_loss: 5693083136.0000 Epoch 175/300 20/20 [==============================] - 0s 3ms/step - loss: 12482793472.0000 - val_loss: 5557828096.0000 Epoch 176/300 20/20 [==============================] - 0s 3ms/step - loss: 18349279232.0000 - val_loss: 5525689856.0000 Epoch 177/300 20/20 [==============================] - 0s 3ms/step - loss: 20888770560.0000 - val_loss: 5514557440.0000 Epoch 178/300 20/20 [==============================] - 0s 3ms/step - loss: 10645092352.0000 - val_loss: 5444797952.0000 Epoch 179/300 20/20 [==============================] - 0s 3ms/step - loss: 14266635264.0000 - val_loss: 5351185920.0000 Epoch 180/300 20/20 [==============================] - 0s 3ms/step - loss: 16891838464.0000 - val_loss: 5437723648.0000 Epoch 181/300 20/20 [==============================] - 0s 3ms/step - loss: 12218904576.0000 - val_loss: 5326941184.0000 Epoch 182/300 20/20 [==============================] - 0s 3ms/step - loss: 8426109440.0000 - val_loss: 5199872512.0000 Epoch 183/300 20/20 [==============================] - 0s 3ms/step - loss: 20171241472.0000 - val_loss: 5112838144.0000 Epoch 184/300 20/20 [==============================] - 0s 3ms/step - loss: 19717984256.0000 - val_loss: 4974892544.0000 Epoch 185/300 20/20 [==============================] - 0s 3ms/step - loss: 14214476800.0000 - val_loss: 4908054016.0000 Epoch 186/300 20/20 [==============================] - 0s 3ms/step - loss: 11615209472.0000 - val_loss: 4901791232.0000 Epoch 187/300 20/20 [==============================] - 0s 3ms/step - loss: 16488503296.0000 - val_loss: 4770205696.0000 Epoch 188/300 20/20 [==============================] - 0s 3ms/step - loss: 17484216320.0000 - val_loss: 4892194304.0000 Epoch 189/300 20/20 [==============================] - 0s 3ms/step - loss: 22345568256.0000 - val_loss: 5056214528.0000 Epoch 190/300 20/20 [==============================] - 0s 3ms/step - loss: 11488556032.0000 - val_loss: 5130994688.0000 Epoch 191/300 20/20 [==============================] - 0s 3ms/step - loss: 12485953536.0000 - val_loss: 5044444672.0000 Epoch 192/300 20/20 [==============================] - 0s 3ms/step - loss: 10952383488.0000 - val_loss: 5003069952.0000 Epoch 193/300 20/20 [==============================] - 0s 3ms/step - loss: 10708965376.0000 - val_loss: 4920652288.0000 Epoch 194/300 20/20 [==============================] - 0s 3ms/step - loss: 14730171392.0000 - val_loss: 4712513024.0000 Epoch 195/300 20/20 [==============================] - 0s 3ms/step - loss: 10026916864.0000 - val_loss: 4605380096.0000 Epoch 196/300 20/20 [==============================] - 0s 3ms/step - loss: 9373974528.0000 - val_loss: 4645751296.0000 Epoch 197/300 20/20 [==============================] - 0s 3ms/step - loss: 15232731136.0000 - val_loss: 4640227840.0000 Epoch 198/300 20/20 [==============================] - 0s 3ms/step - loss: 9473520640.0000 - val_loss: 4427378688.0000 Epoch 199/300 20/20 [==============================] - 0s 3ms/step - loss: 9169963008.0000 - val_loss: 4286446848.0000 Epoch 200/300 20/20 [==============================] - 0s 3ms/step - loss: 15567831040.0000 - val_loss: 4266533888.0000 Epoch 201/300 20/20 [==============================] - 0s 3ms/step - loss: 13465754624.0000 - val_loss: 4197107968.0000 Epoch 202/300 20/20 [==============================] - 0s 3ms/step - loss: 11222283264.0000 - val_loss: 4151612160.0000 Epoch 203/300 20/20 [==============================] - 0s 3ms/step - loss: 12621462528.0000 - val_loss: 4110718208.0000 Epoch 204/300 20/20 [==============================] - 0s 3ms/step - loss: 19458394112.0000 - val_loss: 4226177280.0000 Epoch 205/300 20/20 [==============================] - 0s 3ms/step - loss: 15342701568.0000 - val_loss: 4328188416.0000 Epoch 206/300 20/20 [==============================] - 0s 3ms/step - loss: 14664253440.0000 - val_loss: 4454956544.0000 Epoch 207/300 20/20 [==============================] - 0s 3ms/step - loss: 20154480640.0000 - val_loss: 4362720768.0000 Epoch 208/300 20/20 [==============================] - 0s 3ms/step - loss: 14074322944.0000 - val_loss: 4319872512.0000 Epoch 209/300 20/20 [==============================] - 0s 3ms/step - loss: 17290661888.0000 - val_loss: 4209512704.0000 Epoch 210/300 20/20 [==============================] - 0s 3ms/step - loss: 12734597120.0000 - val_loss: 4133943552.0000 Epoch 211/300 20/20 [==============================] - 0s 3ms/step - loss: 12170002432.0000 - val_loss: 4228498432.0000 Epoch 212/300 20/20 [==============================] - 0s 3ms/step - loss: 27520833536.0000 - val_loss: 4204016896.0000 Epoch 213/300 20/20 [==============================] - 0s 3ms/step - loss: 15444298752.0000 - val_loss: 3979133952.0000 Epoch 214/300 20/20 [==============================] - 0s 3ms/step - loss: 14492321792.0000 - val_loss: 4016113920.0000 Epoch 215/300 20/20 [==============================] - 0s 3ms/step - loss: 12448095232.0000 - val_loss: 3991069440.0000 Epoch 216/300 20/20 [==============================] - 0s 3ms/step - loss: 7723498496.0000 - val_loss: 3959151872.0000 Epoch 217/300 20/20 [==============================] - 0s 3ms/step - loss: 17641699328.0000 - val_loss: 3948414208.0000 Epoch 218/300 20/20 [==============================] - 0s 3ms/step - loss: 11541723136.0000 - val_loss: 4006719488.0000 Epoch 219/300 20/20 [==============================] - 0s 3ms/step - loss: 12976852992.0000 - val_loss: 3908027136.0000 Epoch 220/300 20/20 [==============================] - 0s 3ms/step - loss: 9612829696.0000 - val_loss: 3698778880.0000 Epoch 221/300 20/20 [==============================] - 0s 3ms/step - loss: 14385874944.0000 - val_loss: 3656286464.0000 Epoch 222/300 20/20 [==============================] - 0s 3ms/step - loss: 13342348288.0000 - val_loss: 3652734976.0000 Epoch 223/300 20/20 [==============================] - 0s 3ms/step - loss: 10609857536.0000 - val_loss: 3604432128.0000 Epoch 224/300 20/20 [==============================] - 0s 3ms/step - loss: 16113186816.0000 - val_loss: 3472027392.0000 Epoch 225/300 20/20 [==============================] - 0s 4ms/step - loss: 10555703296.0000 - val_loss: 3500356352.0000 Epoch 226/300 20/20 [==============================] - 0s 4ms/step - loss: 7627467264.0000 - val_loss: 3515696128.0000 Epoch 227/300 20/20 [==============================] - 0s 3ms/step - loss: 21508716544.0000 - val_loss: 3506851072.0000 Epoch 228/300 20/20 [==============================] - 0s 3ms/step - loss: 14117850112.0000 - val_loss: 3514866688.0000 Epoch 229/300 20/20 [==============================] - 0s 3ms/step - loss: 17567500288.0000 - val_loss: 3415786752.0000 Epoch 230/300 20/20 [==============================] - 0s 3ms/step - loss: 11351818240.0000 - val_loss: 3310147328.0000 Epoch 231/300 20/20 [==============================] - 0s 3ms/step - loss: 14277730304.0000 - val_loss: 3226191872.0000 Epoch 232/300 20/20 [==============================] - 0s 3ms/step - loss: 12519522304.0000 - val_loss: 3186786048.0000 Epoch 233/300 20/20 [==============================] - 0s 3ms/step - loss: 14309677056.0000 - val_loss: 3136075776.0000 Epoch 234/300 20/20 [==============================] - 0s 3ms/step - loss: 6644700160.0000 - val_loss: 3099863808.0000 Epoch 235/300 20/20 [==============================] - 0s 3ms/step - loss: 13040544768.0000 - val_loss: 3113063424.0000 Epoch 236/300 20/20 [==============================] - 0s 3ms/step - loss: 27038851072.0000 - val_loss: 3096613632.0000 Epoch 237/300 20/20 [==============================] - 0s 3ms/step - loss: 18564141056.0000 - val_loss: 3047551232.0000 Epoch 238/300 20/20 [==============================] - 0s 4ms/step - loss: 16094984192.0000 - val_loss: 2997397248.0000 Epoch 239/300 20/20 [==============================] - 0s 3ms/step - loss: 11429452800.0000 - val_loss: 2931973376.0000 Epoch 240/300 20/20 [==============================] - 0s 3ms/step - loss: 10935743488.0000 - val_loss: 2955275008.0000 Epoch 241/300 20/20 [==============================] - 0s 3ms/step - loss: 25629282304.0000 - val_loss: 3145986816.0000 Epoch 242/300 20/20 [==============================] - 0s 3ms/step - loss: 12497515520.0000 - val_loss: 3171172096.0000 Epoch 243/300 20/20 [==============================] - 0s 3ms/step - loss: 21392396288.0000 - val_loss: 3193214720.0000 Epoch 244/300 20/20 [==============================] - 0s 3ms/step - loss: 17452640256.0000 - val_loss: 3132237056.0000 Epoch 245/300 20/20 [==============================] - 0s 3ms/step - loss: 24902064128.0000 - val_loss: 3084387584.0000 Epoch 246/300 20/20 [==============================] - 0s 3ms/step - loss: 11663184896.0000 - val_loss: 3012467456.0000 Epoch 247/300 20/20 [==============================] - 0s 3ms/step - loss: 13621778432.0000 - val_loss: 2922486784.0000 Epoch 248/300 20/20 [==============================] - 0s 3ms/step - loss: 10889294848.0000 - val_loss: 2872899584.0000 Epoch 249/300 20/20 [==============================] - 0s 3ms/step - loss: 9457227776.0000 - val_loss: 2833742848.0000 Epoch 250/300 20/20 [==============================] - 0s 3ms/step - loss: 12294114304.0000 - val_loss: 2818140928.0000 Epoch 251/300 20/20 [==============================] - 0s 3ms/step - loss: 19076888576.0000 - val_loss: 2746284288.0000 Epoch 252/300 20/20 [==============================] - 0s 3ms/step - loss: 9706429440.0000 - val_loss: 2720997632.0000 Epoch 253/300 20/20 [==============================] - 0s 3ms/step - loss: 13111175168.0000 - val_loss: 2635349248.0000 Epoch 254/300 20/20 [==============================] - 0s 3ms/step - loss: 15198344192.0000 - val_loss: 2636502528.0000 Epoch 255/300 20/20 [==============================] - 0s 4ms/step - loss: 17669412864.0000 - val_loss: 2641821952.0000 Epoch 256/300 20/20 [==============================] - 0s 3ms/step - loss: 15518029824.0000 - val_loss: 2568465664.0000 Epoch 257/300 20/20 [==============================] - 0s 3ms/step - loss: 13847279616.0000 - val_loss: 2513624320.0000 Epoch 258/300 20/20 [==============================] - 0s 3ms/step - loss: 15418023936.0000 - val_loss: 2524098304.0000 Epoch 259/300 20/20 [==============================] - 0s 3ms/step - loss: 16765114368.0000 - val_loss: 2483012864.0000 Epoch 260/300 20/20 [==============================] - 0s 3ms/step - loss: 6808781824.0000 - val_loss: 2482932224.0000 Epoch 261/300 20/20 [==============================] - 0s 3ms/step - loss: 19092228096.0000 - val_loss: 2562998784.0000 Epoch 262/300 20/20 [==============================] - 0s 3ms/step - loss: 11618811904.0000 - val_loss: 2601523968.0000 Epoch 263/300 20/20 [==============================] - 0s 3ms/step - loss: 10514490368.0000 - val_loss: 2607198208.0000 Epoch 264/300 20/20 [==============================] - 0s 3ms/step - loss: 12440673280.0000 - val_loss: 2607354112.0000 Epoch 265/300 20/20 [==============================] - 0s 3ms/step - loss: 9342834688.0000 - val_loss: 2600731392.0000 Epoch 266/300 20/20 [==============================] - 0s 3ms/step - loss: 12951235584.0000 - val_loss: 2638710784.0000 Epoch 267/300 20/20 [==============================] - 0s 4ms/step - loss: 14883102720.0000 - val_loss: 2558646272.0000 Epoch 268/300 20/20 [==============================] - 0s 3ms/step - loss: 15925526528.0000 - val_loss: 2439648000.0000 Epoch 269/300 20/20 [==============================] - 0s 3ms/step - loss: 21669769216.0000 - val_loss: 2389612032.0000 Epoch 270/300 20/20 [==============================] - 0s 3ms/step - loss: 5376762880.0000 - val_loss: 2359248896.0000 Epoch 271/300 20/20 [==============================] - 0s 3ms/step - loss: 18326302720.0000 - val_loss: 2394266368.0000 Epoch 272/300 20/20 [==============================] - 0s 3ms/step - loss: 15073732608.0000 - val_loss: 2437096704.0000 Epoch 273/300 20/20 [==============================] - 0s 3ms/step - loss: 11951237120.0000 - val_loss: 2468001536.0000 Epoch 274/300 20/20 [==============================] - 0s 3ms/step - loss: 12243785728.0000 - val_loss: 2516361472.0000 Epoch 275/300 20/20 [==============================] - 0s 3ms/step - loss: 19282808832.0000 - val_loss: 2682518528.0000 Epoch 276/300 20/20 [==============================] - 0s 3ms/step - loss: 11275211776.0000 - val_loss: 2811175424.0000 Epoch 277/300 20/20 [==============================] - 0s 3ms/step - loss: 26727817216.0000 - val_loss: 2856919296.0000 Epoch 278/300 20/20 [==============================] - 0s 3ms/step - loss: 15586408448.0000 - val_loss: 2893554432.0000 Epoch 279/300 20/20 [==============================] - 0s 3ms/step - loss: 14941615104.0000 - val_loss: 3009968384.0000 Epoch 280/300 20/20 [==============================] - 0s 3ms/step - loss: 18201120768.0000 - val_loss: 2956428032.0000 Epoch 281/300 20/20 [==============================] - 0s 3ms/step - loss: 19597848576.0000 - val_loss: 2977479936.0000 Epoch 282/300 20/20 [==============================] - 0s 3ms/step - loss: 16657760256.0000 - val_loss: 2919619840.0000 Epoch 283/300 20/20 [==============================] - 0s 3ms/step - loss: 19307429888.0000 - val_loss: 2944085760.0000 Epoch 284/300 20/20 [==============================] - 0s 3ms/step - loss: 14002468864.0000 - val_loss: 2959583488.0000 Epoch 285/300 20/20 [==============================] - 0s 3ms/step - loss: 15397409792.0000 - val_loss: 3045878784.0000 Epoch 286/300 20/20 [==============================] - 0s 3ms/step - loss: 8252623360.0000 - val_loss: 3002622720.0000 Epoch 287/300 20/20 [==============================] - 0s 3ms/step - loss: 10802139136.0000 - val_loss: 2954788096.0000 Epoch 288/300 20/20 [==============================] - 0s 3ms/step - loss: 13035952128.0000 - val_loss: 2894100736.0000 Epoch 289/300 20/20 [==============================] - 0s 3ms/step - loss: 18374307840.0000 - val_loss: 2862449408.0000 Epoch 290/300 20/20 [==============================] - 0s 3ms/step - loss: 9302770688.0000 - val_loss: 2758647296.0000 ###Markdown Modelo RNN ###Code timesteps2 = x_train2_rnn.shape[1] features2 = x_train2_rnn.shape[2] inputs_features_2 = tf.keras.layers.Input(shape=(timesteps2,features2)) x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50))(inputs_features_2) x = tf.keras.layers.Dense(20, activation='relu')(x) x = tf.keras.layers.Dropout(0.5)(x) x = tf.keras.layers.Dense(1,activation='linear')(x) model_rnn2 = tf.keras.Model(inputs=inputs_features_2, outputs=x) model_rnn2.summary() model_rnn2.compile(optimizer='adam',loss='mean_squared_error') history = model_rnn2.fit(x_train2_rnn, y_train2, epochs=300, batch_size=1, validation_split=0.1, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Regression') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 20/20 [==============================] - 3s 37ms/step - loss: 205609189376.0000 - val_loss: 211667566592.0000 Epoch 2/300 20/20 [==============================] - 0s 6ms/step - loss: 205608681472.0000 - val_loss: 211666862080.0000 Epoch 3/300 20/20 [==============================] - 0s 6ms/step - loss: 205606993920.0000 - val_loss: 211664060416.0000 Epoch 4/300 20/20 [==============================] - 0s 6ms/step - loss: 205603520512.0000 - val_loss: 211659440128.0000 Epoch 5/300 20/20 [==============================] - 0s 6ms/step - loss: 205598588928.0000 - val_loss: 211654410240.0000 Epoch 6/300 20/20 [==============================] - 0s 6ms/step - loss: 205597278208.0000 - val_loss: 211649740800.0000 Epoch 7/300 20/20 [==============================] - 0s 6ms/step - loss: 205589348352.0000 - val_loss: 211644170240.0000 Epoch 8/300 20/20 [==============================] - 0s 6ms/step - loss: 205580779520.0000 - val_loss: 211638075392.0000 Epoch 9/300 20/20 [==============================] - 0s 6ms/step - loss: 205573718016.0000 - val_loss: 211630899200.0000 Epoch 10/300 20/20 [==============================] - 0s 6ms/step - loss: 205567770624.0000 - val_loss: 211623428096.0000 Epoch 11/300 20/20 [==============================] - 0s 6ms/step - loss: 205559496704.0000 - val_loss: 211615727616.0000 Epoch 12/300 20/20 [==============================] - 0s 6ms/step - loss: 205555793920.0000 - val_loss: 211607732224.0000 Epoch 13/300 20/20 [==============================] - 0s 6ms/step - loss: 205539737600.0000 - val_loss: 211598835712.0000 Epoch 14/300 20/20 [==============================] - 0s 6ms/step - loss: 205548797952.0000 - val_loss: 211590561792.0000 Epoch 15/300 20/20 [==============================] - 0s 6ms/step - loss: 205534511104.0000 - val_loss: 211582255104.0000 Epoch 16/300 20/20 [==============================] - 0s 6ms/step - loss: 205522944000.0000 - val_loss: 211572621312.0000 Epoch 17/300 20/20 [==============================] - 0s 7ms/step - loss: 205509591040.0000 - val_loss: 211562446848.0000 Epoch 18/300 20/20 [==============================] - 0s 6ms/step - loss: 205499727872.0000 - val_loss: 211550781440.0000 Epoch 19/300 20/20 [==============================] - 0s 6ms/step - loss: 205471973376.0000 - val_loss: 211538067456.0000 Epoch 20/300 20/20 [==============================] - 0s 6ms/step - loss: 205451526144.0000 - val_loss: 211524141056.0000 Epoch 21/300 20/20 [==============================] - 0s 6ms/step - loss: 205445300224.0000 - val_loss: 211509985280.0000 Epoch 22/300 20/20 [==============================] - 0s 6ms/step - loss: 205481263104.0000 - val_loss: 211497893888.0000 Epoch 23/300 20/20 [==============================] - 0s 6ms/step - loss: 205424721920.0000 - val_loss: 211484131328.0000 Epoch 24/300 20/20 [==============================] - 0s 6ms/step - loss: 205446037504.0000 - val_loss: 211470958592.0000 Epoch 25/300 20/20 [==============================] - 0s 6ms/step - loss: 205403013120.0000 - val_loss: 211456180224.0000 Epoch 26/300 20/20 [==============================] - 0s 6ms/step - loss: 205377601536.0000 - val_loss: 211440091136.0000 Epoch 27/300 20/20 [==============================] - 0s 6ms/step - loss: 205341622272.0000 - val_loss: 211422560256.0000 Epoch 28/300 20/20 [==============================] - 0s 6ms/step - loss: 205344473088.0000 - val_loss: 211404783616.0000 Epoch 29/300 20/20 [==============================] - 0s 6ms/step - loss: 205318569984.0000 - val_loss: 211387269120.0000 Epoch 30/300 20/20 [==============================] - 0s 6ms/step - loss: 205266141184.0000 - val_loss: 211367411712.0000 Epoch 31/300 20/20 [==============================] - 0s 6ms/step - loss: 205321715712.0000 - val_loss: 211348439040.0000 Epoch 32/300 20/20 [==============================] - 0s 6ms/step - loss: 205286768640.0000 - val_loss: 211330793472.0000 Epoch 33/300 20/20 [==============================] - 0s 6ms/step - loss: 205225295872.0000 - val_loss: 211309953024.0000 Epoch 34/300 20/20 [==============================] - 0s 6ms/step - loss: 205278986240.0000 - val_loss: 211291127808.0000 Epoch 35/300 20/20 [==============================] - 0s 6ms/step - loss: 205174374400.0000 - val_loss: 211270877184.0000 Epoch 36/300 20/20 [==============================] - 0s 6ms/step - loss: 205191692288.0000 - val_loss: 211248873472.0000 Epoch 37/300 20/20 [==============================] - 0s 6ms/step - loss: 205195870208.0000 - val_loss: 211227557888.0000 Epoch 38/300 20/20 [==============================] - 0s 6ms/step - loss: 205187612672.0000 - val_loss: 211207077888.0000 Epoch 39/300 20/20 [==============================] - 0s 6ms/step - loss: 205151993856.0000 - val_loss: 211185893376.0000 Epoch 40/300 20/20 [==============================] - 0s 6ms/step - loss: 205078069248.0000 - val_loss: 211161759744.0000 Epoch 41/300 20/20 [==============================] - 0s 6ms/step - loss: 205084901376.0000 - val_loss: 211137839104.0000 Epoch 42/300 20/20 [==============================] - 0s 6ms/step - loss: 205028048896.0000 - val_loss: 211113492480.0000 Epoch 43/300 20/20 [==============================] - 0s 6ms/step - loss: 205139509248.0000 - val_loss: 211091832832.0000 Epoch 44/300 20/20 [==============================] - 0s 6ms/step - loss: 205018284032.0000 - val_loss: 211069255680.0000 Epoch 45/300 20/20 [==============================] - 0s 6ms/step - loss: 205048365056.0000 - val_loss: 211046776832.0000 Epoch 46/300 20/20 [==============================] - 0s 6ms/step - loss: 204947685376.0000 - val_loss: 211019776000.0000 Epoch 47/300 20/20 [==============================] - 0s 6ms/step - loss: 204949782528.0000 - val_loss: 210993414144.0000 Epoch 48/300 20/20 [==============================] - 0s 6ms/step - loss: 204946898944.0000 - val_loss: 210968002560.0000 Epoch 49/300 20/20 [==============================] - 0s 16ms/step - loss: 205024919552.0000 - val_loss: 210943787008.0000 Epoch 50/300 20/20 [==============================] - 0s 6ms/step - loss: 204879200256.0000 - val_loss: 210918621184.0000 Epoch 51/300 20/20 [==============================] - 0s 6ms/step - loss: 204819628032.0000 - val_loss: 210890407936.0000 Epoch 52/300 20/20 [==============================] - 0s 6ms/step - loss: 204783697920.0000 - val_loss: 210861031424.0000 Epoch 53/300 20/20 [==============================] - 0s 6ms/step - loss: 204726255616.0000 - val_loss: 210830573568.0000 Epoch 54/300 20/20 [==============================] - 0s 6ms/step - loss: 204685754368.0000 - val_loss: 210799362048.0000 Epoch 55/300 20/20 [==============================] - 0s 6ms/step - loss: 204661571584.0000 - val_loss: 210767560704.0000 Epoch 56/300 20/20 [==============================] - 0s 7ms/step - loss: 204757745664.0000 - val_loss: 210738266112.0000 Epoch 57/300 20/20 [==============================] - 0s 6ms/step - loss: 204666601472.0000 - val_loss: 210707791872.0000 Epoch 58/300 20/20 [==============================] - 0s 6ms/step - loss: 204578586624.0000 - val_loss: 210677186560.0000 Epoch 59/300 20/20 [==============================] - 0s 6ms/step - loss: 204553371648.0000 - val_loss: 210644713472.0000 Epoch 60/300 20/20 [==============================] - 0s 6ms/step - loss: 204605587456.0000 - val_loss: 210611601408.0000 Epoch 61/300 20/20 [==============================] - 0s 6ms/step - loss: 204586647552.0000 - val_loss: 210580013056.0000 Epoch 62/300 20/20 [==============================] - 0s 6ms/step - loss: 204630622208.0000 - val_loss: 210551259136.0000 Epoch 63/300 20/20 [==============================] - 0s 6ms/step - loss: 204428115968.0000 - val_loss: 210517884928.0000 Epoch 64/300 20/20 [==============================] - 0s 7ms/step - loss: 204463636480.0000 - val_loss: 210484510720.0000 Epoch 65/300 20/20 [==============================] - 0s 6ms/step - loss: 204463947776.0000 - val_loss: 210450989056.0000 Epoch 66/300 20/20 [==============================] - 0s 6ms/step - loss: 204253446144.0000 - val_loss: 210415304704.0000 Epoch 67/300 20/20 [==============================] - 0s 6ms/step - loss: 204269813760.0000 - val_loss: 210379669504.0000 Epoch 68/300 20/20 [==============================] - 0s 6ms/step - loss: 204127698944.0000 - val_loss: 210339905536.0000 Epoch 69/300 20/20 [==============================] - 0s 6ms/step - loss: 204188385280.0000 - val_loss: 210302205952.0000 Epoch 70/300 20/20 [==============================] - 0s 6ms/step - loss: 204166037504.0000 - val_loss: 210263916544.0000 Epoch 71/300 20/20 [==============================] - 0s 6ms/step - loss: 204426412032.0000 - val_loss: 210230624256.0000 Epoch 72/300 20/20 [==============================] - 0s 6ms/step - loss: 204107202560.0000 - val_loss: 210195464192.0000 Epoch 73/300 20/20 [==============================] - 0s 6ms/step - loss: 204144312320.0000 - val_loss: 210158764032.0000 Epoch 74/300 20/20 [==============================] - 0s 6ms/step - loss: 204181454848.0000 - val_loss: 210123407360.0000 Epoch 75/300 20/20 [==============================] - 0s 6ms/step - loss: 204250464256.0000 - val_loss: 210088673280.0000 Epoch 76/300 20/20 [==============================] - 0s 6ms/step - loss: 204089180160.0000 - val_loss: 210053496832.0000 Epoch 77/300 20/20 [==============================] - 0s 6ms/step - loss: 204055871488.0000 - val_loss: 210016272384.0000 Epoch 78/300 20/20 [==============================] - 0s 6ms/step - loss: 203801624576.0000 - val_loss: 209974706176.0000 Epoch 79/300 20/20 [==============================] - 0s 6ms/step - loss: 203810816000.0000 - val_loss: 209935515648.0000 Epoch 80/300 20/20 [==============================] - 0s 6ms/step - loss: 203884822528.0000 - val_loss: 209894916096.0000 Epoch 81/300 20/20 [==============================] - 0s 6ms/step - loss: 203608326144.0000 - val_loss: 209851301888.0000 Epoch 82/300 20/20 [==============================] - 0s 6ms/step - loss: 203800199168.0000 - val_loss: 209808179200.0000 Epoch 83/300 20/20 [==============================] - 0s 6ms/step - loss: 203793842176.0000 - val_loss: 209768562688.0000 Epoch 84/300 20/20 [==============================] - 0s 6ms/step - loss: 203600445440.0000 - val_loss: 209727176704.0000 Epoch 85/300 20/20 [==============================] - 0s 6ms/step - loss: 204097716224.0000 - val_loss: 209691738112.0000 Epoch 86/300 20/20 [==============================] - 0s 6ms/step - loss: 203451842560.0000 - val_loss: 209650139136.0000 Epoch 87/300 20/20 [==============================] - 0s 6ms/step - loss: 203391713280.0000 - val_loss: 209604395008.0000 Epoch 88/300 20/20 [==============================] - 0s 6ms/step - loss: 203663310848.0000 - val_loss: 209560748032.0000 Epoch 89/300 20/20 [==============================] - 0s 6ms/step - loss: 203721457664.0000 - val_loss: 209521885184.0000 Epoch 90/300 20/20 [==============================] - 0s 6ms/step - loss: 204007063552.0000 - val_loss: 209486168064.0000 Epoch 91/300 20/20 [==============================] - 0s 6ms/step - loss: 203259772928.0000 - val_loss: 209444880384.0000 Epoch 92/300 20/20 [==============================] - 0s 6ms/step - loss: 203551408128.0000 - val_loss: 209401659392.0000 Epoch 93/300 20/20 [==============================] - 0s 6ms/step - loss: 203172315136.0000 - val_loss: 209356242944.0000 Epoch 94/300 20/20 [==============================] - 0s 6ms/step - loss: 203434131456.0000 - val_loss: 209311481856.0000 Epoch 95/300 20/20 [==============================] - 0s 6ms/step - loss: 203416272896.0000 - val_loss: 209269686272.0000 Epoch 96/300 20/20 [==============================] - 0s 6ms/step - loss: 203381850112.0000 - val_loss: 209227038720.0000 Epoch 97/300 20/20 [==============================] - 0s 7ms/step - loss: 202975903744.0000 - val_loss: 209180540928.0000 Epoch 98/300 20/20 [==============================] - 0s 6ms/step - loss: 202838720512.0000 - val_loss: 209130340352.0000 Epoch 99/300 20/20 [==============================] - 0s 6ms/step - loss: 203132796928.0000 - val_loss: 209082171392.0000 Epoch 100/300 20/20 [==============================] - 0s 6ms/step - loss: 203089166336.0000 - val_loss: 209036443648.0000 Epoch 101/300 20/20 [==============================] - 0s 6ms/step - loss: 202690494464.0000 - val_loss: 208986963968.0000 Epoch 102/300 20/20 [==============================] - 0s 6ms/step - loss: 202716512256.0000 - val_loss: 208937156608.0000 Epoch 103/300 20/20 [==============================] - 0s 6ms/step - loss: 203206852608.0000 - val_loss: 208890232832.0000 Epoch 104/300 20/20 [==============================] - 0s 6ms/step - loss: 202981163008.0000 - val_loss: 208845717504.0000 Epoch 105/300 20/20 [==============================] - 0s 6ms/step - loss: 202758684672.0000 - val_loss: 208797564928.0000 Epoch 106/300 20/20 [==============================] - 0s 6ms/step - loss: 202805706752.0000 - val_loss: 208749723648.0000 Epoch 107/300 20/20 [==============================] - 0s 7ms/step - loss: 203204116480.0000 - val_loss: 208705470464.0000 Epoch 108/300 20/20 [==============================] - 0s 6ms/step - loss: 202768629760.0000 - val_loss: 208659136512.0000 Epoch 109/300 20/20 [==============================] - 0s 6ms/step - loss: 202622861312.0000 - val_loss: 208610459648.0000 Epoch 110/300 20/20 [==============================] - 0s 6ms/step - loss: 202166812672.0000 - val_loss: 208557162496.0000 Epoch 111/300 20/20 [==============================] - 0s 6ms/step - loss: 202462756864.0000 - val_loss: 208505667584.0000 Epoch 112/300 20/20 [==============================] - 0s 7ms/step - loss: 202108846080.0000 - val_loss: 208448012288.0000 Epoch 113/300 20/20 [==============================] - 0s 6ms/step - loss: 202315874304.0000 - val_loss: 208395698176.0000 Epoch 114/300 20/20 [==============================] - 0s 6ms/step - loss: 202800578560.0000 - val_loss: 208348905472.0000 Epoch 115/300 20/20 [==============================] - 0s 6ms/step - loss: 202372710400.0000 - val_loss: 208299147264.0000 Epoch 116/300 20/20 [==============================] - 0s 6ms/step - loss: 202258530304.0000 - val_loss: 208246669312.0000 Epoch 117/300 20/20 [==============================] - 0s 6ms/step - loss: 201634447360.0000 - val_loss: 208189767680.0000 Epoch 118/300 20/20 [==============================] - 0s 6ms/step - loss: 202344038400.0000 - val_loss: 208136224768.0000 Epoch 119/300 20/20 [==============================] - 0s 6ms/step - loss: 201697312768.0000 - val_loss: 208082157568.0000 Epoch 120/300 20/20 [==============================] - 0s 6ms/step - loss: 201939763200.0000 - val_loss: 208025288704.0000 Epoch 121/300 20/20 [==============================] - 0s 6ms/step - loss: 202005020672.0000 - val_loss: 207971237888.0000 Epoch 122/300 20/20 [==============================] - 0s 6ms/step - loss: 202361634816.0000 - val_loss: 207918465024.0000 Epoch 123/300 20/20 [==============================] - 0s 6ms/step - loss: 202329112576.0000 - val_loss: 207873097728.0000 Epoch 124/300 20/20 [==============================] - 0s 6ms/step - loss: 201713614848.0000 - val_loss: 207819096064.0000 Epoch 125/300 20/20 [==============================] - 0s 6ms/step - loss: 201850847232.0000 - val_loss: 207768305664.0000 Epoch 126/300 20/20 [==============================] - 0s 6ms/step - loss: 201736421376.0000 - val_loss: 207709536256.0000 Epoch 127/300 20/20 [==============================] - 0s 6ms/step - loss: 201954033664.0000 - val_loss: 207656730624.0000 Epoch 128/300 20/20 [==============================] - 0s 6ms/step - loss: 201351708672.0000 - val_loss: 207602450432.0000 Epoch 129/300 20/20 [==============================] - 0s 6ms/step - loss: 201306783744.0000 - val_loss: 207541944320.0000 Epoch 130/300 20/20 [==============================] - 0s 6ms/step - loss: 201450405888.0000 - val_loss: 207482732544.0000 Epoch 131/300 20/20 [==============================] - 0s 6ms/step - loss: 201065021440.0000 - val_loss: 207424012288.0000 Epoch 132/300 20/20 [==============================] - 0s 6ms/step - loss: 200573239296.0000 - val_loss: 207361490944.0000 Epoch 133/300 20/20 [==============================] - 0s 6ms/step - loss: 201698295808.0000 - val_loss: 207302033408.0000 Epoch 134/300 20/20 [==============================] - 0s 6ms/step - loss: 201291120640.0000 - val_loss: 207243198464.0000 Epoch 135/300 20/20 [==============================] - 0s 6ms/step - loss: 201448980480.0000 - val_loss: 207188918272.0000 Epoch 136/300 20/20 [==============================] - 0s 6ms/step - loss: 201416032256.0000 - val_loss: 207132770304.0000 Epoch 137/300 20/20 [==============================] - 0s 6ms/step - loss: 201197502464.0000 - val_loss: 207078359040.0000 Epoch 138/300 20/20 [==============================] - 0s 6ms/step - loss: 200765997056.0000 - val_loss: 207018000384.0000 Epoch 139/300 20/20 [==============================] - 0s 6ms/step - loss: 201629532160.0000 - val_loss: 206960459776.0000 Epoch 140/300 20/20 [==============================] - 0s 6ms/step - loss: 200796176384.0000 - val_loss: 206903459840.0000 Epoch 141/300 20/20 [==============================] - 0s 6ms/step - loss: 200836464640.0000 - val_loss: 206845329408.0000 Epoch 142/300 20/20 [==============================] - 0s 6ms/step - loss: 201788522496.0000 - val_loss: 206791491584.0000 Epoch 143/300 20/20 [==============================] - 0s 7ms/step - loss: 200713535488.0000 - val_loss: 206733688832.0000 Epoch 144/300 20/20 [==============================] - 0s 6ms/step - loss: 200561197056.0000 - val_loss: 206674608128.0000 Epoch 145/300 20/20 [==============================] - 0s 6ms/step - loss: 201142796288.0000 - val_loss: 206615543808.0000 Epoch 146/300 20/20 [==============================] - 0s 6ms/step - loss: 201014837248.0000 - val_loss: 206559854592.0000 Epoch 147/300 20/20 [==============================] - 0s 6ms/step - loss: 200734818304.0000 - val_loss: 206500921344.0000 Epoch 148/300 20/20 [==============================] - 0s 6ms/step - loss: 200848343040.0000 - val_loss: 206443839488.0000 Epoch 149/300 20/20 [==============================] - 0s 6ms/step - loss: 200959885312.0000 - val_loss: 206387101696.0000 Epoch 150/300 20/20 [==============================] - 0s 7ms/step - loss: 200742830080.0000 - val_loss: 206329348096.0000 Epoch 151/300 20/20 [==============================] - 0s 6ms/step - loss: 200136654848.0000 - val_loss: 206268530688.0000 Epoch 152/300 20/20 [==============================] - 0s 6ms/step - loss: 199822622720.0000 - val_loss: 206202929152.0000 Epoch 153/300 20/20 [==============================] - 0s 6ms/step - loss: 200805482496.0000 - val_loss: 206140211200.0000 Epoch 154/300 20/20 [==============================] - 0s 6ms/step - loss: 199414579200.0000 - val_loss: 206075052032.0000 Epoch 155/300 20/20 [==============================] - 0s 6ms/step - loss: 200521777152.0000 - val_loss: 206011023360.0000 Epoch 156/300 20/20 [==============================] - 0s 6ms/step - loss: 199928168448.0000 - val_loss: 205951729664.0000 Epoch 157/300 20/20 [==============================] - 0s 7ms/step - loss: 200406253568.0000 - val_loss: 205889994752.0000 Epoch 158/300 20/20 [==============================] - 0s 6ms/step - loss: 199713177600.0000 - val_loss: 205823606784.0000 Epoch 159/300 20/20 [==============================] - 0s 6ms/step - loss: 200591540224.0000 - val_loss: 205764018176.0000 Epoch 160/300 20/20 [==============================] - 0s 6ms/step - loss: 199883882496.0000 - val_loss: 205704019968.0000 Epoch 161/300 20/20 [==============================] - 0s 6ms/step - loss: 199635517440.0000 - val_loss: 205639385088.0000 Epoch 162/300 20/20 [==============================] - 0s 6ms/step - loss: 199135985664.0000 - val_loss: 205569540096.0000 Epoch 163/300 20/20 [==============================] - 0s 5ms/step - loss: 199971864576.0000 - val_loss: 205503873024.0000 Epoch 164/300 20/20 [==============================] - 0s 6ms/step - loss: 199398064128.0000 - val_loss: 205439778816.0000 Epoch 165/300 20/20 [==============================] - 0s 5ms/step - loss: 199453933568.0000 - val_loss: 205371981824.0000 Epoch 166/300 20/20 [==============================] - 0s 6ms/step - loss: 200623063040.0000 - val_loss: 205316440064.0000 Epoch 167/300 20/20 [==============================] - 0s 6ms/step - loss: 198359678976.0000 - val_loss: 205246775296.0000 Epoch 168/300 20/20 [==============================] - 0s 6ms/step - loss: 200208728064.0000 - val_loss: 205182812160.0000 Epoch 169/300 20/20 [==============================] - 0s 6ms/step - loss: 199393427456.0000 - val_loss: 205119225856.0000 Epoch 170/300 20/20 [==============================] - 0s 6ms/step - loss: 199680753664.0000 - val_loss: 205058162688.0000 Epoch 171/300 20/20 [==============================] - 0s 6ms/step - loss: 199647461376.0000 - val_loss: 204995624960.0000 Epoch 172/300 20/20 [==============================] - 0s 6ms/step - loss: 199546732544.0000 - val_loss: 204931399680.0000 Epoch 173/300 20/20 [==============================] - 0s 6ms/step - loss: 199083720704.0000 - val_loss: 204865880064.0000 Epoch 174/300 20/20 [==============================] - 0s 6ms/step - loss: 198895370240.0000 - val_loss: 204799803392.0000 Epoch 175/300 20/20 [==============================] - 0s 6ms/step - loss: 199749795840.0000 - val_loss: 204738412544.0000 Epoch 176/300 20/20 [==============================] - 0s 5ms/step - loss: 198645268480.0000 - val_loss: 204669730816.0000 Epoch 177/300 20/20 [==============================] - 0s 6ms/step - loss: 198931316736.0000 - val_loss: 204602556416.0000 Epoch 178/300 20/20 [==============================] - 0s 6ms/step - loss: 198971228160.0000 - val_loss: 204534841344.0000 Epoch 179/300 20/20 [==============================] - 0s 6ms/step - loss: 198060670976.0000 - val_loss: 204462440448.0000 Epoch 180/300 20/20 [==============================] - 0s 6ms/step - loss: 199025836032.0000 - val_loss: 204394774528.0000 Epoch 181/300 20/20 [==============================] - 0s 6ms/step - loss: 198051266560.0000 - val_loss: 204324028416.0000 Epoch 182/300 20/20 [==============================] - 0s 6ms/step - loss: 197775147008.0000 - val_loss: 204247810048.0000 Epoch 183/300 20/20 [==============================] - 0s 6ms/step - loss: 198175653888.0000 - val_loss: 204174508032.0000 Epoch 184/300 20/20 [==============================] - 0s 6ms/step - loss: 198694174720.0000 - val_loss: 204104122368.0000 Epoch 185/300 20/20 [==============================] - 0s 6ms/step - loss: 197512101888.0000 - val_loss: 204030935040.0000 Epoch 186/300 20/20 [==============================] - 0s 6ms/step - loss: 198560514048.0000 - val_loss: 203962236928.0000 Epoch 187/300 20/20 [==============================] - 0s 6ms/step - loss: 197135319040.0000 - val_loss: 203885199360.0000 Epoch 188/300 20/20 [==============================] - 0s 6ms/step - loss: 198164168704.0000 - val_loss: 203813208064.0000 Epoch 189/300 20/20 [==============================] - 0s 6ms/step - loss: 197076484096.0000 - val_loss: 203736940544.0000 Epoch 190/300 20/20 [==============================] - 0s 6ms/step - loss: 198456344576.0000 - val_loss: 203665096704.0000 Epoch 191/300 20/20 [==============================] - 0s 6ms/step - loss: 196265803776.0000 - val_loss: 203585290240.0000 Epoch 192/300 20/20 [==============================] - 0s 5ms/step - loss: 199617036288.0000 - val_loss: 203523358720.0000 Epoch 193/300 20/20 [==============================] - 0s 6ms/step - loss: 197648138240.0000 - val_loss: 203452776448.0000 Epoch 194/300 20/20 [==============================] - 0s 6ms/step - loss: 195911057408.0000 - val_loss: 203369218048.0000 Epoch 195/300 20/20 [==============================] - 0s 6ms/step - loss: 198667370496.0000 - val_loss: 203300618240.0000 Epoch 196/300 20/20 [==============================] - 0s 6ms/step - loss: 197474222080.0000 - val_loss: 203228315648.0000 Epoch 197/300 20/20 [==============================] - 0s 5ms/step - loss: 197667487744.0000 - val_loss: 203162959872.0000 Epoch 198/300 20/20 [==============================] - 0s 6ms/step - loss: 197648809984.0000 - val_loss: 203086626816.0000 Epoch 199/300 20/20 [==============================] - 0s 6ms/step - loss: 197142806528.0000 - val_loss: 203011178496.0000 Epoch 200/300 20/20 [==============================] - 0s 5ms/step - loss: 197626757120.0000 - val_loss: 202940661760.0000 Epoch 201/300 20/20 [==============================] - 0s 6ms/step - loss: 197687410688.0000 - val_loss: 202870882304.0000 Epoch 202/300 20/20 [==============================] - 0s 5ms/step - loss: 198652608512.0000 - val_loss: 202807459840.0000 Epoch 203/300 20/20 [==============================] - 0s 5ms/step - loss: 196997922816.0000 - val_loss: 202735697920.0000 Epoch 204/300 20/20 [==============================] - 0s 6ms/step - loss: 198252625920.0000 - val_loss: 202668228608.0000 Epoch 205/300 20/20 [==============================] - 0s 6ms/step - loss: 197208604672.0000 - val_loss: 202600235008.0000 Epoch 206/300 20/20 [==============================] - 0s 5ms/step - loss: 195910516736.0000 - val_loss: 202520870912.0000 Epoch 207/300 20/20 [==============================] - 0s 6ms/step - loss: 196129226752.0000 - val_loss: 202442326016.0000 Epoch 208/300 20/20 [==============================] - 0s 6ms/step - loss: 198465421312.0000 - val_loss: 202373382144.0000 Epoch 209/300 20/20 [==============================] - 0s 6ms/step - loss: 195670736896.0000 - val_loss: 202297311232.0000 Epoch 210/300 20/20 [==============================] - 0s 6ms/step - loss: 196473716736.0000 - val_loss: 202219782144.0000 Epoch 211/300 20/20 [==============================] - 0s 5ms/step - loss: 197698633728.0000 - val_loss: 202147217408.0000 Epoch 212/300 20/20 [==============================] - 0s 6ms/step - loss: 196686118912.0000 - val_loss: 202072801280.0000 Epoch 213/300 20/20 [==============================] - 0s 6ms/step - loss: 197840207872.0000 - val_loss: 202005086208.0000 Epoch 214/300 20/20 [==============================] - 0s 6ms/step - loss: 195639050240.0000 - val_loss: 201929572352.0000 Epoch 215/300 20/20 [==============================] - 0s 6ms/step - loss: 197434621952.0000 - val_loss: 201855549440.0000 Epoch 216/300 20/20 [==============================] - 0s 7ms/step - loss: 194800156672.0000 - val_loss: 201780674560.0000 Epoch 217/300 20/20 [==============================] - 0s 6ms/step - loss: 196191748096.0000 - val_loss: 201699147776.0000 Epoch 218/300 20/20 [==============================] - 0s 6ms/step - loss: 197557010432.0000 - val_loss: 201626697728.0000 Epoch 219/300 20/20 [==============================] - 0s 6ms/step - loss: 195717529600.0000 - val_loss: 201552691200.0000 Epoch 220/300 20/20 [==============================] - 0s 6ms/step - loss: 194525315072.0000 - val_loss: 201467576320.0000 Epoch 221/300 20/20 [==============================] - 0s 6ms/step - loss: 195368943616.0000 - val_loss: 201383755776.0000 Epoch 222/300 20/20 [==============================] - 0s 6ms/step - loss: 194591588352.0000 - val_loss: 201296510976.0000 Epoch 223/300 20/20 [==============================] - 0s 6ms/step - loss: 196075520000.0000 - val_loss: 201215492096.0000 Epoch 224/300 20/20 [==============================] - 0s 6ms/step - loss: 194235465728.0000 - val_loss: 201133654016.0000 Epoch 225/300 20/20 [==============================] - 0s 6ms/step - loss: 194325266432.0000 - val_loss: 201045753856.0000 Epoch 226/300 20/20 [==============================] - 0s 6ms/step - loss: 194816868352.0000 - val_loss: 200959213568.0000 Epoch 227/300 20/20 [==============================] - 0s 6ms/step - loss: 193856962560.0000 - val_loss: 200871460864.0000 Epoch 228/300 20/20 [==============================] - 0s 6ms/step - loss: 194655813632.0000 - val_loss: 200783511552.0000 Epoch 229/300 20/20 [==============================] - 0s 6ms/step - loss: 193817919488.0000 - val_loss: 200696578048.0000 Epoch 230/300 20/20 [==============================] - 0s 5ms/step - loss: 196155392000.0000 - val_loss: 200618426368.0000 Epoch 231/300 20/20 [==============================] - 0s 6ms/step - loss: 192730873856.0000 - val_loss: 200533458944.0000 Epoch 232/300 20/20 [==============================] - 0s 6ms/step - loss: 194286632960.0000 - val_loss: 200441004032.0000 Epoch 233/300 20/20 [==============================] - 0s 6ms/step - loss: 196248633344.0000 - val_loss: 200364670976.0000 Epoch 234/300 20/20 [==============================] - 0s 6ms/step - loss: 194982232064.0000 - val_loss: 200287797248.0000 Epoch 235/300 20/20 [==============================] - 0s 6ms/step - loss: 197532532736.0000 - val_loss: 200222457856.0000 Epoch 236/300 20/20 [==============================] - 0s 6ms/step - loss: 195046080512.0000 - val_loss: 200149368832.0000 Epoch 237/300 20/20 [==============================] - 0s 6ms/step - loss: 195003498496.0000 - val_loss: 200071118848.0000 Epoch 238/300 20/20 [==============================] - 0s 6ms/step - loss: 194340978688.0000 - val_loss: 199991214080.0000 Epoch 239/300 20/20 [==============================] - 0s 6ms/step - loss: 193858027520.0000 - val_loss: 199903854592.0000 Epoch 240/300 20/20 [==============================] - 0s 6ms/step - loss: 194225471488.0000 - val_loss: 199821557760.0000 Epoch 241/300 20/20 [==============================] - 0s 5ms/step - loss: 193106542592.0000 - val_loss: 199733952512.0000 Epoch 242/300 20/20 [==============================] - 0s 6ms/step - loss: 193289289728.0000 - val_loss: 199641399296.0000 Epoch 243/300 20/20 [==============================] - 0s 6ms/step - loss: 192609861632.0000 - val_loss: 199549566976.0000 Epoch 244/300 20/20 [==============================] - 0s 6ms/step - loss: 194853126144.0000 - val_loss: 199466926080.0000 Epoch 245/300 20/20 [==============================] - 0s 6ms/step - loss: 195366092800.0000 - val_loss: 199393001472.0000 Epoch 246/300 20/20 [==============================] - 0s 5ms/step - loss: 191666683904.0000 - val_loss: 199303348224.0000 Epoch 247/300 20/20 [==============================] - 0s 6ms/step - loss: 193242578944.0000 - val_loss: 199208910848.0000 Epoch 248/300 20/20 [==============================] - 0s 6ms/step - loss: 193133871104.0000 - val_loss: 199124107264.0000 Epoch 249/300 20/20 [==============================] - 0s 5ms/step - loss: 192370802688.0000 - val_loss: 199037337600.0000 Epoch 250/300 20/20 [==============================] - 0s 6ms/step - loss: 193571454976.0000 - val_loss: 198949470208.0000 Epoch 251/300 20/20 [==============================] - 0s 5ms/step - loss: 194704261120.0000 - val_loss: 198867632128.0000 Epoch 252/300 20/20 [==============================] - 0s 6ms/step - loss: 193436663808.0000 - val_loss: 198788071424.0000 Epoch 253/300 20/20 [==============================] - 0s 5ms/step - loss: 192176521216.0000 - val_loss: 198696615936.0000 Epoch 254/300 20/20 [==============================] - 0s 6ms/step - loss: 192661913600.0000 - val_loss: 198607847424.0000 Epoch 255/300 20/20 [==============================] - 0s 6ms/step - loss: 191849873408.0000 - val_loss: 198516097024.0000 Epoch 256/300 20/20 [==============================] - 0s 5ms/step - loss: 191971721216.0000 - val_loss: 198422282240.0000 Epoch 257/300 20/20 [==============================] - 0s 6ms/step - loss: 192345554944.0000 - val_loss: 198333382656.0000 Epoch 258/300 20/20 [==============================] - 0s 6ms/step - loss: 191386910720.0000 - val_loss: 198237667328.0000 Epoch 259/300 20/20 [==============================] - 0s 6ms/step - loss: 194582446080.0000 - val_loss: 198162006016.0000 Epoch 260/300 20/20 [==============================] - 0s 6ms/step - loss: 191834603520.0000 - val_loss: 198073368576.0000 Epoch 261/300 20/20 [==============================] - 0s 6ms/step - loss: 193069580288.0000 - val_loss: 197985959936.0000 Epoch 262/300 20/20 [==============================] - 0s 6ms/step - loss: 190795497472.0000 - val_loss: 197891014656.0000 Epoch 263/300 20/20 [==============================] - 0s 6ms/step - loss: 192873742336.0000 - val_loss: 197805867008.0000 Epoch 264/300 20/20 [==============================] - 0s 6ms/step - loss: 189695082496.0000 - val_loss: 197710659584.0000 Epoch 265/300 20/20 [==============================] - 0s 6ms/step - loss: 192307101696.0000 - val_loss: 197616402432.0000 Epoch 266/300 20/20 [==============================] - 0s 7ms/step - loss: 193346650112.0000 - val_loss: 197532319744.0000 Epoch 267/300 20/20 [==============================] - 0s 6ms/step - loss: 190554030080.0000 - val_loss: 197439258624.0000 Epoch 268/300 20/20 [==============================] - 0s 6ms/step - loss: 192334823424.0000 - val_loss: 197351522304.0000 Epoch 269/300 20/20 [==============================] - 0s 6ms/step - loss: 190972428288.0000 - val_loss: 197257068544.0000 Epoch 270/300 20/20 [==============================] - 0s 6ms/step - loss: 191833866240.0000 - val_loss: 197168873472.0000 Epoch 271/300 20/20 [==============================] - 0s 6ms/step - loss: 190571593728.0000 - val_loss: 197072257024.0000 Epoch 272/300 20/20 [==============================] - 0s 6ms/step - loss: 191287099392.0000 - val_loss: 196980817920.0000 Epoch 273/300 20/20 [==============================] - 0s 6ms/step - loss: 189644701696.0000 - val_loss: 196885872640.0000 Epoch 274/300 20/20 [==============================] - 0s 6ms/step - loss: 189868703744.0000 - val_loss: 196785799168.0000 Epoch 275/300 20/20 [==============================] - 0s 6ms/step - loss: 191204917248.0000 - val_loss: 196689395712.0000 Epoch 276/300 20/20 [==============================] - 0s 6ms/step - loss: 190128390144.0000 - val_loss: 196598366208.0000 Epoch 277/300 20/20 [==============================] - 0s 6ms/step - loss: 190244192256.0000 - val_loss: 196501913600.0000 Epoch 278/300 20/20 [==============================] - 0s 6ms/step - loss: 190359011328.0000 - val_loss: 196409819136.0000 Epoch 279/300 20/20 [==============================] - 0s 6ms/step - loss: 189802905600.0000 - val_loss: 196313350144.0000 Epoch 280/300 20/20 [==============================] - 0s 6ms/step - loss: 192521322496.0000 - val_loss: 196220502016.0000 Epoch 281/300 20/20 [==============================] - 0s 7ms/step - loss: 190662066176.0000 - val_loss: 196136615936.0000 Epoch 282/300 20/20 [==============================] - 0s 6ms/step - loss: 192347996160.0000 - val_loss: 196050092032.0000 Epoch 283/300 20/20 [==============================] - 0s 6ms/step - loss: 190986682368.0000 - val_loss: 195962994688.0000 Epoch 284/300 20/20 [==============================] - 0s 6ms/step - loss: 190412455936.0000 - val_loss: 195873488896.0000 Epoch 285/300 20/20 [==============================] - 0s 6ms/step - loss: 190332010496.0000 - val_loss: 195780984832.0000 Epoch 286/300 20/20 [==============================] - 0s 6ms/step - loss: 190250139648.0000 - val_loss: 195688873984.0000 Epoch 287/300 20/20 [==============================] - 0s 6ms/step - loss: 191144738816.0000 - val_loss: 195599122432.0000 Epoch 288/300 20/20 [==============================] - 0s 5ms/step - loss: 187408842752.0000 - val_loss: 195495460864.0000 Epoch 289/300 20/20 [==============================] - 0s 6ms/step - loss: 188153036800.0000 - val_loss: 195394977792.0000 Epoch 290/300 20/20 [==============================] - 0s 6ms/step - loss: 191667961856.0000 - val_loss: 195303784448.0000 Epoch 291/300 20/20 [==============================] - 0s 6ms/step - loss: 190223843328.0000 - val_loss: 195217670144.0000 Epoch 292/300 20/20 [==============================] - 0s 5ms/step - loss: 188031369216.0000 - val_loss: 195116875776.0000 Epoch 293/300 20/20 [==============================] - 0s 5ms/step - loss: 189280550912.0000 - val_loss: 195019145216.0000 Epoch 294/300 20/20 [==============================] - 0s 6ms/step - loss: 188113879040.0000 - val_loss: 194917695488.0000 Epoch 295/300 20/20 [==============================] - 0s 6ms/step - loss: 189368074240.0000 - val_loss: 194818080768.0000 Epoch 296/300 20/20 [==============================] - 0s 5ms/step - loss: 188448931840.0000 - val_loss: 194719645696.0000 Epoch 297/300 20/20 [==============================] - 0s 6ms/step - loss: 189411917824.0000 - val_loss: 194625781760.0000 Epoch 298/300 20/20 [==============================] - 0s 6ms/step - loss: 189570449408.0000 - val_loss: 194532032512.0000 Epoch 299/300 20/20 [==============================] - 0s 6ms/step - loss: 187364016128.0000 - val_loss: 194428665856.0000 Epoch 300/300 20/20 [==============================] - 0s 6ms/step - loss: 191374426112.0000 - val_loss: 194342813696.0000 ###Markdown Dataset 3 Stock PredictionData de https://www.kaggle.com/szrlee/stock-time-series-20050101-to-20171231, en especifico el primer .csv que sale, "ABBA" ###Code df_stock = pd.read_csv('/content/AABA_2006-01-01_to_2018-01-01.csv') df_stock df_stock = df_stock.dropna() df_stock['Date'] = pd.to_datetime(df_stock['Date'],infer_datetime_format=True) df_stock['year'] = df_stock['Date'].apply(lambda d: d.year) df_stock['month'] = df_stock['Date'].apply(lambda d: d.month) df_stock['day'] = df_stock['Date'].apply(lambda d: d.day) del df_stock['Date'] del df_stock['Name'] df_stock y_train3 = df_stock["Close"] y_train3 = y_train3.values[:-19] x_train3 = df_stock[['Open', 'High', 'Low','Volume','year','month','day']] for name in x_train3.columns: scaler = MinMaxScaler() scaler.fit(x_train3[[name]]) x_train3[name] = scaler.transform(x_train3[[name]]) x_train3 = x_train3.values[:-19] print(x_train3.shape) print(y_train3.shape) x_train3 x_train3_rnn = x_train3.reshape(len(x_train3)//15,15,x_train3.shape[1]) print(x_train3_rnn.shape) ###Output (200, 15, 7) ###Markdown Modelo CNN ###Code input_shape3 = x_train3_rnn.shape inputs3 = tf.keras.layers.Input(shape=input_shape3[1:]) cnn3 = tf.keras.layers.Conv1D(32,3, activation='relu')(inputs3) #cnn3 = tf.keras.layers.MaxPooling1D(5)(cnn3) cnn3 = tf.keras.layers.Conv1D(32,3, activation='relu')(cnn3) cnn3 = tf.keras.layers.GlobalMaxPooling1D()(cnn3) cnn3 = tf.keras.layers.Dropout(0.5)(cnn3) cnn3 = tf.keras.layers.Dense(1)(cnn3) model_cnn_3 = tf.keras.Model(inputs=inputs3, outputs=cnn3) model_cnn_3.summary() model_cnn_3.compile(optimizer='adam',loss='mean_squared_error') history = model_cnn_3.fit(x_train3_rnn, y_train3, epochs=300, batch_size=1, validation_split=0.1, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Regression') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 180/180 [==============================] - 1s 3ms/step - loss: 634.3073 - val_loss: 389.2445 Epoch 2/300 180/180 [==============================] - 0s 2ms/step - loss: 148.0689 - val_loss: 257.8294 Epoch 3/300 180/180 [==============================] - 0s 2ms/step - loss: 144.7204 - val_loss: 309.4091 Epoch 4/300 180/180 [==============================] - 0s 2ms/step - loss: 123.7709 - val_loss: 112.8481 Epoch 5/300 180/180 [==============================] - 0s 2ms/step - loss: 111.7350 - val_loss: 61.0470 Epoch 6/300 180/180 [==============================] - 0s 2ms/step - loss: 77.9697 - val_loss: 126.1040 Epoch 7/300 180/180 [==============================] - 0s 2ms/step - loss: 119.2464 - val_loss: 45.1535 Epoch 8/300 180/180 [==============================] - 0s 2ms/step - loss: 122.3151 - val_loss: 48.1394 Epoch 9/300 180/180 [==============================] - 0s 3ms/step - loss: 83.6133 - val_loss: 14.3947 Epoch 10/300 180/180 [==============================] - 0s 2ms/step - loss: 93.5361 - val_loss: 25.1996 Epoch 11/300 180/180 [==============================] - 0s 2ms/step - loss: 102.2885 - val_loss: 45.4063 Epoch 12/300 180/180 [==============================] - 0s 2ms/step - loss: 100.3702 - val_loss: 14.3345 Epoch 13/300 180/180 [==============================] - 0s 3ms/step - loss: 99.7747 - val_loss: 11.9951 Epoch 14/300 180/180 [==============================] - 0s 3ms/step - loss: 108.5112 - val_loss: 15.6093 Epoch 15/300 180/180 [==============================] - 0s 2ms/step - loss: 105.5462 - val_loss: 19.5549 Epoch 16/300 180/180 [==============================] - 0s 3ms/step - loss: 108.6157 - val_loss: 21.2693 Epoch 17/300 180/180 [==============================] - 0s 3ms/step - loss: 110.2239 - val_loss: 13.0870 Epoch 18/300 180/180 [==============================] - 0s 2ms/step - loss: 106.2976 - val_loss: 9.7596 Epoch 19/300 180/180 [==============================] - 0s 2ms/step - loss: 102.4633 - val_loss: 6.9659 Epoch 20/300 180/180 [==============================] - 0s 2ms/step - loss: 92.1853 - val_loss: 23.8609 Epoch 21/300 180/180 [==============================] - 0s 2ms/step - loss: 75.8087 - val_loss: 6.5730 Epoch 22/300 180/180 [==============================] - 0s 2ms/step - loss: 92.2719 - val_loss: 6.7140 Epoch 23/300 180/180 [==============================] - 0s 2ms/step - loss: 91.2572 - val_loss: 14.3626 Epoch 24/300 180/180 [==============================] - 0s 2ms/step - loss: 94.7533 - val_loss: 9.8688 Epoch 25/300 180/180 [==============================] - 0s 2ms/step - loss: 100.4360 - val_loss: 7.5872 Epoch 26/300 180/180 [==============================] - 0s 3ms/step - loss: 78.1866 - val_loss: 21.0882 Epoch 27/300 180/180 [==============================] - 0s 2ms/step - loss: 105.0036 - val_loss: 25.2570 Epoch 28/300 180/180 [==============================] - 0s 2ms/step - loss: 92.5016 - val_loss: 12.6613 Epoch 29/300 180/180 [==============================] - 0s 2ms/step - loss: 105.2582 - val_loss: 34.9366 Epoch 30/300 180/180 [==============================] - 0s 3ms/step - loss: 81.7866 - val_loss: 15.6426 Epoch 31/300 180/180 [==============================] - 0s 3ms/step - loss: 93.1028 - val_loss: 14.5896 Epoch 32/300 180/180 [==============================] - 0s 2ms/step - loss: 77.4964 - val_loss: 7.0811 Epoch 33/300 180/180 [==============================] - 0s 2ms/step - loss: 102.5187 - val_loss: 6.7654 Epoch 34/300 180/180 [==============================] - 0s 3ms/step - loss: 90.3045 - val_loss: 17.8541 Epoch 35/300 180/180 [==============================] - 0s 2ms/step - loss: 85.3536 - val_loss: 8.2399 Epoch 36/300 180/180 [==============================] - 0s 3ms/step - loss: 102.1338 - val_loss: 4.9060 Epoch 37/300 180/180 [==============================] - 0s 3ms/step - loss: 79.4282 - val_loss: 14.4536 Epoch 38/300 180/180 [==============================] - 0s 2ms/step - loss: 82.3949 - val_loss: 7.9632 Epoch 39/300 180/180 [==============================] - 0s 2ms/step - loss: 82.4025 - val_loss: 3.5130 Epoch 40/300 180/180 [==============================] - 0s 2ms/step - loss: 81.4060 - val_loss: 5.9633 Epoch 41/300 180/180 [==============================] - 0s 2ms/step - loss: 87.8797 - val_loss: 7.3947 Epoch 42/300 180/180 [==============================] - 0s 2ms/step - loss: 80.1158 - val_loss: 4.2726 Epoch 43/300 180/180 [==============================] - 0s 2ms/step - loss: 76.7300 - val_loss: 11.8826 Epoch 44/300 180/180 [==============================] - 0s 2ms/step - loss: 82.3573 - val_loss: 3.7846 Epoch 45/300 180/180 [==============================] - 0s 2ms/step - loss: 90.3437 - val_loss: 6.3286 Epoch 46/300 180/180 [==============================] - 0s 2ms/step - loss: 69.4322 - val_loss: 22.4405 Epoch 47/300 180/180 [==============================] - 0s 2ms/step - loss: 77.6748 - val_loss: 29.2315 Epoch 48/300 180/180 [==============================] - 0s 2ms/step - loss: 72.7987 - val_loss: 23.9230 Epoch 49/300 180/180 [==============================] - 0s 2ms/step - loss: 79.4113 - val_loss: 8.5995 Epoch 50/300 180/180 [==============================] - 0s 2ms/step - loss: 77.5733 - val_loss: 5.1224 Epoch 51/300 180/180 [==============================] - 0s 2ms/step - loss: 74.7668 - val_loss: 3.0145 Epoch 52/300 180/180 [==============================] - 0s 2ms/step - loss: 96.2904 - val_loss: 4.3446 Epoch 53/300 180/180 [==============================] - 0s 2ms/step - loss: 74.6363 - val_loss: 2.3515 Epoch 54/300 180/180 [==============================] - 0s 2ms/step - loss: 85.9540 - val_loss: 4.4111 Epoch 55/300 180/180 [==============================] - 0s 2ms/step - loss: 63.4266 - val_loss: 2.9889 Epoch 56/300 180/180 [==============================] - 0s 2ms/step - loss: 95.7993 - val_loss: 1.9010 Epoch 57/300 180/180 [==============================] - 0s 2ms/step - loss: 85.1206 - val_loss: 18.0927 Epoch 58/300 180/180 [==============================] - 0s 2ms/step - loss: 78.4601 - val_loss: 3.4088 Epoch 59/300 180/180 [==============================] - 0s 2ms/step - loss: 71.0094 - val_loss: 9.5719 Epoch 60/300 180/180 [==============================] - 0s 2ms/step - loss: 69.7212 - val_loss: 10.6361 Epoch 61/300 180/180 [==============================] - 0s 2ms/step - loss: 74.7167 - val_loss: 8.6366 Epoch 62/300 180/180 [==============================] - 0s 2ms/step - loss: 76.8298 - val_loss: 10.1448 Epoch 63/300 180/180 [==============================] - 0s 2ms/step - loss: 73.1046 - val_loss: 12.5400 Epoch 64/300 180/180 [==============================] - 0s 2ms/step - loss: 81.4669 - val_loss: 7.4782 Epoch 65/300 180/180 [==============================] - 0s 2ms/step - loss: 59.7225 - val_loss: 8.7938 Epoch 66/300 180/180 [==============================] - 0s 2ms/step - loss: 81.1669 - val_loss: 2.7996 Epoch 67/300 180/180 [==============================] - 0s 2ms/step - loss: 66.3953 - val_loss: 1.8124 Epoch 68/300 180/180 [==============================] - 0s 2ms/step - loss: 68.6080 - val_loss: 35.5808 Epoch 69/300 180/180 [==============================] - 0s 2ms/step - loss: 71.5494 - val_loss: 1.9825 Epoch 70/300 180/180 [==============================] - 0s 2ms/step - loss: 76.4717 - val_loss: 15.7971 Epoch 71/300 180/180 [==============================] - 0s 2ms/step - loss: 76.8942 - val_loss: 1.5860 Epoch 72/300 180/180 [==============================] - 0s 2ms/step - loss: 68.0969 - val_loss: 17.1746 Epoch 73/300 180/180 [==============================] - 0s 2ms/step - loss: 77.4752 - val_loss: 28.4484 Epoch 74/300 180/180 [==============================] - 0s 2ms/step - loss: 80.6443 - val_loss: 2.8138 Epoch 75/300 180/180 [==============================] - 0s 2ms/step - loss: 77.2816 - val_loss: 10.5900 Epoch 76/300 180/180 [==============================] - 0s 2ms/step - loss: 61.6473 - val_loss: 8.1942 Epoch 77/300 180/180 [==============================] - 0s 2ms/step - loss: 67.6147 - val_loss: 4.2190 Epoch 78/300 180/180 [==============================] - 0s 2ms/step - loss: 58.7573 - val_loss: 5.4324 Epoch 79/300 180/180 [==============================] - 0s 2ms/step - loss: 71.1777 - val_loss: 14.0575 Epoch 80/300 180/180 [==============================] - 0s 2ms/step - loss: 51.5831 - val_loss: 16.1585 Epoch 81/300 180/180 [==============================] - 0s 2ms/step - loss: 58.0161 - val_loss: 2.7191 Epoch 82/300 180/180 [==============================] - 0s 2ms/step - loss: 59.0300 - val_loss: 3.4591 Epoch 83/300 180/180 [==============================] - 0s 2ms/step - loss: 72.2151 - val_loss: 12.9014 Epoch 84/300 180/180 [==============================] - 0s 2ms/step - loss: 57.9704 - val_loss: 1.0602 Epoch 85/300 180/180 [==============================] - 0s 2ms/step - loss: 72.8483 - val_loss: 9.4850 Epoch 86/300 180/180 [==============================] - 0s 2ms/step - loss: 61.2724 - val_loss: 0.6818 Epoch 87/300 180/180 [==============================] - 0s 2ms/step - loss: 66.1852 - val_loss: 3.3081 Epoch 88/300 180/180 [==============================] - 0s 2ms/step - loss: 62.6425 - val_loss: 8.6130 Epoch 89/300 180/180 [==============================] - 0s 2ms/step - loss: 63.1761 - val_loss: 1.1076 Epoch 90/300 180/180 [==============================] - 0s 2ms/step - loss: 52.7408 - val_loss: 1.8909 Epoch 91/300 180/180 [==============================] - 0s 2ms/step - loss: 65.9392 - val_loss: 8.1224 Epoch 92/300 180/180 [==============================] - 0s 2ms/step - loss: 48.2413 - val_loss: 1.2609 Epoch 93/300 180/180 [==============================] - 0s 2ms/step - loss: 56.1556 - val_loss: 11.8687 Epoch 94/300 180/180 [==============================] - 0s 2ms/step - loss: 50.5489 - val_loss: 5.8906 Epoch 95/300 180/180 [==============================] - 0s 2ms/step - loss: 61.2534 - val_loss: 0.8148 Epoch 96/300 180/180 [==============================] - 0s 2ms/step - loss: 52.7981 - val_loss: 2.1809 Epoch 97/300 180/180 [==============================] - 0s 2ms/step - loss: 66.5003 - val_loss: 4.3934 Epoch 98/300 180/180 [==============================] - 0s 2ms/step - loss: 53.1908 - val_loss: 1.9893 Epoch 99/300 180/180 [==============================] - 0s 2ms/step - loss: 56.0422 - val_loss: 3.7428 Epoch 100/300 180/180 [==============================] - 0s 2ms/step - loss: 67.7770 - val_loss: 0.9535 Epoch 101/300 180/180 [==============================] - 0s 2ms/step - loss: 53.5306 - val_loss: 2.7659 Epoch 102/300 180/180 [==============================] - 0s 2ms/step - loss: 72.7253 - val_loss: 2.5022 Epoch 103/300 180/180 [==============================] - 0s 2ms/step - loss: 55.5594 - val_loss: 1.9597 Epoch 104/300 180/180 [==============================] - 0s 2ms/step - loss: 61.1671 - val_loss: 22.7531 Epoch 105/300 180/180 [==============================] - 0s 2ms/step - loss: 68.7512 - val_loss: 4.3302 Epoch 106/300 180/180 [==============================] - 0s 2ms/step - loss: 53.2677 - val_loss: 5.8403 ###Markdown Modelo RNN ###Code timesteps3 = x_train3_rnn.shape[1] features3 = x_train3_rnn.shape[2] inputs_features_3 = tf.keras.layers.Input(shape=(timesteps3,features3)) x3 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50))(inputs_features_3) x3 = tf.keras.layers.Dense(20, activation='relu')(x3) x3 = tf.keras.layers.Dropout(0.5)(x3) x3 = tf.keras.layers.Dense(1,activation='linear')(x3) model_rnn3 = tf.keras.Model(inputs=inputs_features_3, outputs=x3) model_rnn3.summary() model_rnn3.compile(optimizer='adam',loss='mean_squared_error') history = model_rnn3.fit(x_train3_rnn, y_train3, epochs=300, batch_size=1, validation_split=0.1, callbacks=[History(), EarlyStopping(patience=20, monitor="val_loss",restore_best_weights=True)]) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Loss Error') plt.ylabel('Regression') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() ###Output Epoch 1/300 180/180 [==============================] - 4s 9ms/step - loss: 504.2174 - val_loss: 0.3088 Epoch 2/300 180/180 [==============================] - 1s 5ms/step - loss: 178.0520 - val_loss: 5.0849 Epoch 3/300 180/180 [==============================] - 1s 5ms/step - loss: 130.9646 - val_loss: 2.9198 Epoch 4/300 180/180 [==============================] - 1s 5ms/step - loss: 178.5398 - val_loss: 3.5063 Epoch 5/300 180/180 [==============================] - 1s 5ms/step - loss: 162.1192 - val_loss: 7.7518 Epoch 6/300 180/180 [==============================] - 1s 5ms/step - loss: 158.5634 - val_loss: 5.4739 Epoch 7/300 180/180 [==============================] - 1s 5ms/step - loss: 159.8095 - val_loss: 4.2995 Epoch 8/300 180/180 [==============================] - 1s 5ms/step - loss: 138.5165 - val_loss: 4.8082 Epoch 9/300 180/180 [==============================] - 1s 5ms/step - loss: 161.0423 - val_loss: 2.5077 Epoch 10/300 180/180 [==============================] - 1s 5ms/step - loss: 136.1345 - val_loss: 5.0876 Epoch 11/300 180/180 [==============================] - 1s 5ms/step - loss: 134.2307 - val_loss: 1.1295 Epoch 12/300 180/180 [==============================] - 1s 5ms/step - loss: 166.7514 - val_loss: 5.7427 Epoch 13/300 180/180 [==============================] - 1s 5ms/step - loss: 153.0662 - val_loss: 0.6983 Epoch 14/300 180/180 [==============================] - 1s 5ms/step - loss: 133.3404 - val_loss: 17.4028 Epoch 15/300 180/180 [==============================] - 1s 5ms/step - loss: 132.9045 - val_loss: 2.9154 Epoch 16/300 180/180 [==============================] - 1s 5ms/step - loss: 121.6977 - val_loss: 3.6142 Epoch 17/300 180/180 [==============================] - 1s 5ms/step - loss: 126.5554 - val_loss: 3.9579 Epoch 18/300 180/180 [==============================] - 1s 5ms/step - loss: 146.6235 - val_loss: 4.1242 Epoch 19/300 180/180 [==============================] - 1s 5ms/step - loss: 129.0746 - val_loss: 10.2150 Epoch 20/300 180/180 [==============================] - 1s 5ms/step - loss: 148.2097 - val_loss: 12.0920 Epoch 21/300 180/180 [==============================] - 1s 5ms/step - loss: 138.0366 - val_loss: 0.3518 ###Markdown Conclusiones: como se puede observar en los graficos de las Losses de los modelos, tanto como para el primer, segundo y tercer dataset el modelo convolucional funciona mejor que el modelo recurrente, esto nos sorprendio ya que es bastante contra-intuitivo, uno pensaria que siempre el modelo recurrente funcionara mejor que uno convolucional 1D para secuencias, pero no fue asi, quizas con algun dataset mas especifico donde solo hayan secuencias temporales como el del desafio kaggle el modelo recurrente de mejores resultados pero con estos datsets, tanto de sentiment classification, house prediction y stock value prediction influyen mas los datos por si solos que datos de secuencias anteriores. Por esta razon es que refutamos la pregunta de investigacion "No tiene sentido usar una red convolucional para aprendizaje de secuencias, su error será siempre mayor que el de una red recurrente.", si tiene sentido utilzar una red convolucional para aprendizaje de secuencias ya que existen variados casos donde da mejores resultados que utilizar una red recurrente. Desafio Kaggle Pronóstico de la velocidad del viento.En esta sección trabajaremos con datos nacionales. El objetivo de este desafío es pronosticar la velocidad promedio a 20 metros de altura a diferentes instantes de tiempo. El dataset contiene la velocidad media del viento a 20 metros de altura para cada instante de tiempo determinado. El conjunto de entrenamiento contiene 123365 registros cada 10 minutos de la velocidad media del viento desde el 19 de Diciembre de 2012 a las 00:10 hasta el 24 de Abril de 2015 a las 23:50.El conjunto de set consta de 2000 registros a instantes de tiempos seleccionados entre el 26 de Abril y el 24 de Septiembre de 2015.La efectividad del modelo se determinará comparando las predicciones para el conjunto de pruebas con los valores reales vía RMSE.La competencia Kaggle es:https://www.kaggle.com/c/prediccin-velocidad-del-viento/Los datos para comenzar a trabajar se encuentran en dicha competencia, y también en:http://www.inf.utfsm.cl/~cvalle/Winspeed.rarPueden también descargar los datos directamente a la máquina donde estén ejecutando el código con los siguientes comandos ###Code !wget http://www.inf.utfsm.cl/~cvalle/Windspeed.rar !unrar e /content/Windspeed.rar ###Output URL transformed to HTTPS due to an HSTS policy --2021-08-12 22:54:04-- https://www.inf.utfsm.cl/~cvalle/Windspeed.rar Resolving www.inf.utfsm.cl (www.inf.utfsm.cl)... 200.1.19.11, 2800:270:c::11 Connecting to www.inf.utfsm.cl (www.inf.utfsm.cl)|200.1.19.11|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 428435 (418K) [application/x-rar-compressed] Saving to: ‘Windspeed.rar.1’ Windspeed.rar.1 100%[===================>] 418.39K 894KB/s in 0.5s 2021-08-12 22:54:05 (894 KB/s) - ‘Windspeed.rar.1’ saved [428435/428435] UNRAR 5.50 freeware Copyright (c) 1993-2017 Alexander Roshal Extracting from /content/Windspeed.rar Would you like to replace the existing file windspeed-training.csv 2730380 bytes, modified on 2021-07-15 20:48 with a new one 2730380 bytes, modified on 2021-07-15 20:48 [Y]es, [N]o, [A]ll, n[E]ver, [R]ename, [Q]uit User break User break ###Markdown PrediccionesLas predicciones de cada equipo para el conjunto de test deben ser subidas a la página de la competencia en Kaggle. En caso de entrenar los modelos con información extra o derivada del conjunto de entrenamiento original, se deben adjuntar éstos datos, de manera que el código entregado en el notebook de la tarea pueda ser reproducido.El puntaje obtenido en esta parte dependerá del ranking obtenido en la competencia (50%) y del score del modelo (50%. Para asignar el puntaje obtenido por score, se comparará el RMSE de vuestro modelo con las predicciones con la misma métrica en una red muy básica. ###Code def rmse(y_pred, y_true): return K.sqrt(K.mean(K.square(y_pred - y_true))) data_train = pd.read_csv('/content/windspeed-training.csv') data_test = pd.read_csv('/content/windspeed-testing.csv', names=["date"]) data_train.rename(columns={"'Fecha'": "date", "'Velocidad de viento en 20.0 metros [mean]'": 'speed'}, inplace=True) data_train['date'] = pd.to_datetime(data_train["date"], format="%Y-%m-%d %H:%M") data_test['date'] = pd.to_datetime(data_test["date"], format="%d-%m-%Y %H:%M") data_train['year'] = data_train['date'].apply(lambda d: d.year) data_train['month'] = data_train['date'].apply(lambda d: d.month) data_train['day'] = data_train['date'].apply(lambda d: d.day) data_train['hour'] = data_train['date'].apply(lambda d: d.hour) data_train['minute'] = data_train['date'].apply(lambda d: d.minute) data_test['year'] = data_test['date'].apply(lambda d: d.year) data_test['month'] = data_test['date'].apply(lambda d: d.month) data_test['day'] = data_test['date'].apply(lambda d: d.day) data_test['hour'] = data_test['date'].apply(lambda d: d.hour) data_test['minute'] = data_test['date'].apply(lambda d: d.minute) data_train.head() data_test.head() # Normalizamos la data data_train['month'] = (data_train['month'] - 1)/ 11 data_train['day'] = (data_train['day'] - 1)/ 30 data_train['hour'] = data_train['hour'] / 23 data_train['minute'] = data_train['minute'] / 59 data_test['month'] = (data_test['month'] - 1)/ 11 data_test['day'] = (data_test['day'] - 1)/ 30 data_test['hour'] = data_test['hour'] / 23 data_test['minute'] = data_test['minute'] / 59 data_train.head() data_test.head() # Visualizamos la data _data = data_train.groupby(['hour', 'minute'])['speed'].mean() _data.plot.bar(title='Velocidad promedio por hora del dia', figsize=(20,10)) months = ['enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre', 'diciembre'] fig, axs = plt.subplots(3, 4, figsize=(30,15)) for i in range(12): _data = data_train[data_train['month'] == i + 1].groupby('hour')['speed'].mean() _title = 'Velocidad promedio en {0}'.format(months[i]) _data.plot.bar(title=_title, ax=axs[i % 3, i // 3]) # definimos algunas constantes para la red y el entrenamiento TIMESTEPS = 72 # Monitoreamos por medio dia FEATURE_LIST = ['month','day', 'hour', 'minute'] FEATURES = len(FEATURE_LIST) # Preparamos los datos del train set seq = deque([[-1 for _ in range(FEATURES)] for _ in range(TIMESTEPS)], maxlen=TIMESTEPS) y_train = data_train[['speed']].values _x_train = data_train[FEATURE_LIST].values x_train = [] for i in range(len(data_train)): # removemos un cierto porcentaje de los datos para que tenga saltos temporales # similar al test set if random() < 0.3: seq.append(_x_train[i]) else: seq.append([-1 for _ in range(FEATURES)]) x_train.append([i for i in seq]) x_train = np.array(x_train) # 5% de la data la dejamos de validación val_mask = x_train.shape[0] // 20 x_val = x_train[-val_mask:] y_val = y_train[-val_mask:] x_train = x_train[:-val_mask] y_train = y_train[:-val_mask] print(x_train.shape) print(y_train.shape) print(x_val.shape) print(y_val.shape) # Preparamos los datos del test set data_test['next_date']= data_test['date'].shift(-1) data_test['time_diff'] = data_test['next_date'] - data_test['date'] data_test.head() x_test = [] _x_test = data_test[FEATURE_LIST].values seq = deque([[-1 for _ in range(FEATURES)] for _ in range(TIMESTEPS)], maxlen=TIMESTEPS) for i in range(data_test.shape[0]): seq.append(_x_test[i]) x_test.append([i for i in seq]) dt = data_test['time_diff'][i] while dt > timedelta(minutes=10): dt -= timedelta(minutes=10) seq.append([-1 for _ in range(FEATURES)]) x_test = np.array(x_test) x_test.shape # definimos el modelo model = Sequential() model.add(Masking(mask_value=-1, input_shape=(TIMESTEPS, FEATURES))) model.add(GRU(64, return_sequences=True)) model.add(Dropout(0.2)) model.add(GRU(64, return_sequences=True)) model.add(Dropout(0.2)) model.add(GRU(64, return_sequences=True)) model.add(Dropout(0.2)) model.add(GRU(64)) model.add(Dropout(0.2)) model.add(Dense(32, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1, activation='linear')) model.summary() keras.utils.plot_model(model=model, show_shapes=True) opt = keras.optimizers.Adam(learning_rate=1e-3, decay=1e-5) model.compile(optimizer=opt, loss=rmse, metrics=['accuracy']) callbacks = [History()] history = model.fit(x=x_train, y=y_train, batch_size=x_train.shape[0] // 10, epochs=60, validation_data=(x_val, y_val), callbacks=callbacks) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Entrenamiento') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() # realizamos la prediccion y_pred = model.predict(x_test) y_pred f = open("submit.csv", "w") f.write("'Fecha','Velocidad de viento en 20.0 metros [mean]'\n") for i in range(data_test.shape[0]): datestr = data_test['date'][i].strftime("%d-%m-%Y %H:%M") f.write(f"{datestr},{y_pred[i][0]}\n") f.close() ###Output _____no_output_____
house_prices/day93-regression-trees.ipynb
###Markdown Data- [Kaggle House Prices for Advanced Regression Techniques](https://www.kaggle.com/c/house-prices-advanced-regression-techniques) dataset- Goal is to predict sales prices ###Code # Load Data input_dir = "./data/" # Load my data train_file = os.path.join(input_dir, "train.csv") test_file = os.path.join(input_dir, "test.csv") train = pd.read_csv(train_file) test = pd.read_csv(test_file) X = train.iloc[:,:-1] y = train.iloc[:,-1] X.head() y.head() ###Output _____no_output_____ ###Markdown Map featuresMapping categorical variables to continuous value (Day24) ###Code # Map categorical variables with one-hot encoding X_transformed = pd.get_dummies(X) print(X_transformed.shape) # Drop NAs (because regression tree can't handle NAs) X_transformed.dropna(axis=1, how='any', inplace=True) print(X_transformed.shape) # Split to train and test set X_train, X_test, y_train, y_test = train_test_split(X_transformed, y, test_size=0.2) print("feature:", X_train.shape[1]) print("num train samples:", X_train.shape[0]) print("num valid samples:", X_test.shape[0]) ###Output feature: 286 num train samples: 1168 num valid samples: 292 ###Markdown Regression tree ###Code from sklearn import tree clf = tree.DecisionTreeRegressor() clf = clf.fit(X_train, y_train) ###Output _____no_output_____ ###Markdown Prediction accuracy for exact answer: ###Code import sklearn.metrics sklearn.metrics.accuracy_score(y_pred=clf.predict(X_test), y_true=y_test) ###Output _____no_output_____ ###Markdown Prediction accuracy allowing some margin of error: ###Code def evaluation(margin = 5000): def _eval(m): y_hat = clf.predict(X_test) answer_within_range = (y_test+m > y_hat) & (y_hat > y_test-m) return(sum(answer_within_range) / len(answer_within_range)) if type(margin) in [list, range]: return([_eval(m) for m in margin]) else: return(_eval(m)) margins = range(0, 25000, 1000) plt.plot(margins, evaluation(margins), 'ro') plt.ylabel('Accuracy', fontsize=16) plt.xlabel('Margin of error ($)', fontsize=16) plt.suptitle('Prediction accuracy for House Prices', fontsize=16) plt.title('(One-hot, drop NA, Regression tree)', fontsize=10) ###Output _____no_output_____
Dacon/ch4/study_Dacon_ch04_2021.05.26.wed.ipynb
###Markdown - 2021년 05월 26일 수요일- 영우 글로벌 러닝- K-digital 핵심 실무인재 양성과정- 데이터 기반 인공지능 시스템 엔지니어 양성과정 3기- 4조: 강희찬, 김가연, 박세은, 이승은, 이원석 데이콘- 구글에서 운영하는 캐글처럼 데이터를 제공해 토론(해커톤)과 경진대회를 주최하고 있다. - 해커톤: 데이터셋을 제공하고 토론하는 형식 - 경진대회: 금융, 스포츠, 기상, 지구과학 등 다양한 분야의 각기 다른 종류의 데이터를 가지고 참여자마다 다양한 방식으로 주어진 문제를 해결한다. - 대회마다 정해진 평가척도를 통해 순위를 결정하고, 우승자에게는 대회 결과에 따라 상금 및 대회 특전을 부여한다. 4. 상점 신용카드 매출 예측목차1. 문제 정의2. 데이터 전처리3. 탐색적 데이터 분석4. 모델 구축과 검증5. 성능 향상을 위한 방법6. 정리 4.1. 문제 정의 4.1.1. 경진대회 소개- __문제__: 2016년 6월 1일부터 2019년 2월 28일까지 총 2년 8개월 가량의 신용카드 매출 데이터를 가지고 2019년 3월 1일부터 2019년 5월 31일까지의 상점별 3개월 총매출을 예측- __주의할 점__: 3개월의 시간. - 3,4,5월은 봄이라는 계절을 의미, 새 학기에 접어든 학생들로 인해 여러 변수가 발생하고 황사 등 날씨의 영향을 받을 수 있으며, 5월 가정의 달이 일반적인 상점에게는 성수기에 속한다는 부분이 있다.- __핵심__: 상점별 3~5월의 매출 총합을 예측하는 것- 참고- 핀테크 기업인 'FUNDA(펀다)'는 상환 기간의 매출을 예측하여 신용 점수가 낮거나 담보를 가지지 못하는 우수 상점들에 금융 기회를 제공하려 한다. 데이콘 제 1회 펀다 대출 상점 총매출 예측 경진대회도 비슷했다. - 전통적인 금융권은 소상공인 대출 시 신용점수 또는 담보물 위주의 평가를 진행, 상환 기간의 매출을 예측해 신용 점수가 낮거나 담보가 없는 우수 상점에 금융 기회를 제공하려 한다. 대출 상점 총매출 예측 경진대회는 상환 기간의 '총매출'을 예측하는 모델을 만드는 것이다. 예측된 매출을 기반으로 대출 한도를 설정함. 모델이 정확할수록 더 낮은 금리와 더 높은 한도를 제공함. 마크다운으로 수식 입력 (LaTex)- https://iot-lab.tistory.com/150- https://en.wikipedia.org/wiki/Help:Displaying_a_formulaFormatting_using_TeX 4.1.2. 평가척도데이터- funda_train.csv: 458MB, 6,556,613행 9열의 데이터 중 1,967개 상점의 매출을 예측- submission.csv: 1,967개 상점의 3개월 매출 총합을 예측 평가 척도: __MAE__(Mean Absolute Error, 평균절대오차)- __MAE__ = $\frac{1}{n} $$\sum_{i=1}^n $$\vert y_{i} - y_{i}' \vert$- 실제값과 예측값의 차이에 절대값을 취하고 평균값을 계산한 손실함수- 손실의 크기를 그대로 반영하므로 이번 상점 매출예측대회의 평가 척도로 사용됐다.- 예시: - 상점1과 상점2의 실제 매출이 100만 원, 80만 원이고, 예측 모델은 상점1의 매출을 95만 원, 상점2의 매출은 70만 원으로 예측했다고 가정, 각 상점의 실제값과 예측값의 차이의 절대값은 각각 5만 원과 10만 원이고 평균값은 7만5천 원인데, 여기서 7만5천 원이 MAE 값이다. - __MAE__ 추가 설명: - 예측값과 실제값이 떨어진 정도의 절대값을 평균낸 것이기에, 전체 데이터의 학습된 정도를 쉽게 파악할수 있다. 또한 절대값 사용으로 차이값의 변동이 심하지 않기에, 이상값 및 에러값에 크게 영향 받지 않는다. - 단점으로는 절대값을 취하기에 해당 예측이 어느 식으로 오차가 발생했는지, 음수인지 양수인지에 대해 방향성을 상실한다. [MSE 및 MAE 공부](https://wiserloner.tistory.com/1041) - MAE는 회귀지표로써 사용된다. [MSE와 MAE의 차이](https://m.blog.naver.com/heygun/221516529668) ||코드 평가 항목||-----|:-----||1|주어진 데이터를 정제하는 과정||2|추론 시 사용할 변수 구축 및 선택 과정||3|모델 선택 및 정규화 과정||4|최적화 과정| 4.1.3. 대회 관련 사전 조사__사전 조사__: - 대회에 관련하여 필요한 논문이나 법률, 해당 분야의 지식 등을 조사하고, 조사한 내용을 바탕으로 데이터에 접근해 데이터 전처리, 변수 생성, 모델 사용까지 이루어진다. 따라서 해당 조사 과정에서 다양한 방법으로 정보를 수집하고 정보를 활용해야 한다. 참고하면 좋을 사이트- [예측 교과서](https://otexts.com/fppkr/) (통계학)- [계량 경제학, 강기준 저](http://kanggc.iptime.org/em/kanggc.pdf): 200쪽부터 - 해당 사이트: http://kanggc.iptime.org/em/em.html- 그 외 김창진 교수님의 계량 경제학 강의 노트 __도메인 조사 정리__:크게 3 가지로 분류했다.1) __자영업자__- 상점을 운영하는 사람으로, 많은 상황을 고려하게 한다.- 상점의 매출은 상점이 운영되는 시기(주중·주말, 공휴일 여부, 오전·오후·심야, 4계절, 학기·방학)와 관련된 상황과 자영업자의 상황(리모델링, 계약, 휴폐업)에 따라 움직인다. |파생 변수|변수 가능성 설명||:-----|:-----||평일·주말 매출|주말에 매출이 높다는 가정하에 평일과 주말 매출을 비교해 차이 여부를 확인||계절별 4분기 매출|결과 제출의 계절이 봄이므로 사계절 중 봄 매출액이 차지하는 비중을 확인||시간대별 매출|하루를 여러 시간대로 나눠 매출의 차이를 확인||학기로 나눈 4분기 매출|학기로 나누면 성수기, 특정 시점 매출 등 여러 상황을 유발할 수 있어 매출의 차이를 확인||공휴일 변수 여부|평일에 있는 공휴일은 평소와 다른 양상을 보이므로 확인 요함||개업·휴업·폐업에 따른 변수|개업 전에 매출이 없는 상점, 휴업으로 인해 특정 시점의 매출이 없는 상점, 폐업으로 인해 매출이 사라진 상점을 파악하고 최종 결과에 영향이 있을 상점을 확인| 2) __카드사 직원__- 카드사에서 정한 다양한 결제 방식 고려: 할부, 포인트, 현금 사용, 환불·재결제 등 - 환불의 경우: 매출액 감소하며 거래 건수는 증가함으로 확인 요함 - 직원·고객의 실수로 재결제 할 경우 - 할부 결제의 경우 매출액의 표기 시점을 어떻게 정의하느냐 - 개인적으로 할부 결제 시점 판단 여부를 생각지도 못했다. 도메인 조사 중 여러 파생 변수에 대해 고려할 것을 유의해야 할 것 같다. |파생 변수|변수 가능성 설명||-----|:-----||포인트 적립·사용 여부|포인트 사용 할인이나 포인트 사용 결제 등 예기치 못한 변수들을 차단해 단순한 모델을 만들어야 하므로 이 항목은 확인 요함||할부 사용 여부|결제 금액이 매출로 잡히는 시점을 판단해야 단순한 모델링이 가능. 시점 정의 요함||현금 사용 여부|카드 매출액과는 연관이 없지만, 상점에서 세금 등의 문제로 의도치 않은 데이터가 존재할 수 있기에 확인 요함||환불·재결제 여부|환불은 사용 건수는 오르지만 매출액은 감소하기에 추후 최종 예측 단계에서 부정확한 예측을 일으키고, 재결제 역시 같은 맥락이므로 꼭 제거하고 확인 요함||통신 장애 여부|카드 단말기 오류 등으로 단기간 매출액이 잡히지 않는 경우. 매출액에 영향을 주는 모든 것은 생각해볼 여지가 있기에 확인 요함| 3) __고객__- 매출을 발생시킴: 꾸준한 결제, 특수 결제 상황, 계절에 따른 결제 상황, 월급날 전후 결제 상황, 경제적 여유 등 - 꾸준한 결제: 생활에서 항상 반복하는 루틴. 교통비, 식사비, 커피 구매비 등 - 경제적 여유: 경제적으로 어려워지면 상점 매출 감소할 것 - 그 외: 취미나 취향에 따라 결정되는 결제 상황 |파생 변수|변수 가능성 설명||-----|:-----||평상시 결제(기본 유지비)| 기본으로 사용하는 금액, 꾸준하고 완만한 그래프를 보여주는 매출액. 상점 매출액의 기본이 되는 항목||평소와 다른 결제 상황(예상치 못한 소비)|해당 매출액은 내외부 변수에 민감하고 변동이 심해서 매출액의 흐름을 다양한 방향에서 확인||고객 성향에 따른 상황|돈을 저축하거나 저축하지 않는 두 성향의 고객에 따른 매출액 변화 확인 요함||월급날 기준 근처 결제 상황|월급날을 기준으로 1~2주간 많은 소비를 하지만, 월급날 직전 일주일은 소비를 최소화함. 매출액 변화에 영향을 끼칠 변수들은 확인 요함||경제적 여유에 따른 결제 상황|경제적으로 풍족한 고객이 많다면 매출액이 증가하지만, 반대의 경우 감소함. 외부 자료를 통해 확인 요함||국가경제지표에 따른 결제 상황|경제 성장률, 소비자 물가 지수, 주식 종합 지수, 빅맥 지수, 스타벅스 지수 등 세상에는 국가 경제를 확인할 수 있는 지표들이 정말 다양하게 많이 존재함. 외부 자료를 수집해 예측할 시점의 상황을 예측하고 그에 따른 고객들의 소비 성향을 확인해 예측 정확도를 높일 수 있으므로 __반드시__ 확인 요함| 4.1.4. 문제 해결을 위한 접근 방식 소개대회에서 제공한 데이터로 상점의 매출을 예측하기 위해서는 데이터에 대한 정확한 이해가 필요하다. 따라서 제공된 데이터의 속성에 대한 특징을 파악하기 위해 데이터를 살펴보고 신용카드 매출 데이터에 존재하는 노이즈를 알아보고 분석 방향을 다룬다. 데이터 살펴보기__시계열 데이터__ : 일정한 시간동안 수집된 일련의 순차적으로 정해진 데이터 셋의 집합으로 시간에 관해 순서가 매겨져 있다는 점과, 연속한 관측치는 서로 상관관계를 갖고 있다. 즉, 시계열이 갖고 있는 법칙성을 발견해 이를 모델링하고, 추정된 모델을 통하여 미래의 값을 예측한다.- 관측치가 시간의 순서를 가진 데이터다. 'A라는 시간에 B라는 고객이 C라는 제품을 구매했다'는 정보로, 고객 B에 대한 다양한 정보(성별, 나이, 혼인 여부, 거주 지역, 고객 등급, 국적, 자주 구매하는 제품군)과 제품 C에 대한 다양한 정보(제품의 분류, 가격, 특징, 인기도 등)가 들어 있다. 작업 디렉터리- 데이터를 가공하고 새로운 변수를 생성하거나 파일의 크기, 이름 등을 편하게 변경하면서 사용하므로 작업 시간을 단축하기 위해 작업 디렉토리 환경을 조성해 준다.- 초기 학습 데이터를 가져오고 전처리 및 탐색적 데이터 분석(EDA 분석)이 완료된 데이터를 작업을 수행할 때마다 불러오는 시간도 단축하고, 제출 파일의 여러 버전을 저장하므로 정리된 공간에서 작업하는 것이 효율적이다. ###Code # 경고 메세지 숨기기 import warnings warnings.filterwarnings("ignore") # 작업 디렉터리 설정할 것 import os os.chdir('D:/data lab/9. Dacon/ch04') # csv 데이터 불러오기 import pandas as pd train = pd.read_csv('./funda_train.csv') submission = pd.read_csv('./submission.csv') ###Output _____no_output_____ ###Markdown - os.chdir(): 설정하고 싶은 작업 경로를 인자로 넘겨주면 작업 디렉터리를 변경할 수 있다.- pd.read_csv(): csv 데이터 불러오기 ###Code # 데이터 크기 확인 train.shape # 학습 데이터는 총 6,556,613개(행)이며, 상점마다 9개의 변수 존재 # 데이터 첫 5줄 확인 train.head() # 데이터 알아보기 train.info() # 데이터의 크기와 각 속성의 타입 확인 ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 6556613 entries, 0 to 6556612 Data columns (total 9 columns): store_id int64 card_id int64 card_company object transacted_date object transacted_time object installment_term int64 region object type_of_business object amount float64 dtypes: float64(1), int64(3), object(5) memory usage: 450.2+ MB ###Markdown |변수명|설명||-----|:-----||store_id|상점 고유 아이디(각 상점 식별), 1,967개의 상점||card_id|사용한 카드의 고유 아이디||card_company|비식별화된 카드 회사(카드 회사 정보 보호를 위해 비식별화됨)||transacted_date|거래 날짜||transacted_time|거래 시간(시:분)||installment_term|실제 할부 개월이 기재되고, 전체 매출은 거래 시간에 기록됨. 할부 개월 수(포인트 사용 시 '60개월+실제 할부 개월'을 할부 개월 수에 기재||region|상점의 지역||type_of_business|상점의 업종||amount|결제 금액 즉, 매출액(단위는 원이 아님 주의, 실제 단위는 알 수 없음)| 데이터의 노이즈데이터 수집 과정에서 생기는 데이터 노이즈가 반영되어 있기에, 전처리 작업이 필요- 결측값(NA, nan), 분포를 따르지 않는 이상치(outlier), 빈칸으로 남겨진 데이터(unkown), 사람 실수로 잘못 입력된 값, 개인정보 보호 위해 익명화된 속성 등 ###Code # 결측값 확인 train[:5] # 5행 ###Output _____no_output_____ ###Markdown - region(지역) 속성에 결측값 NaN이 존재함 - 결측값 처리 방법: 평균값 대치, 0으로 대치, 제거 등 ###Code # 이상치 확인 train[train['amount']<0] ###Output _____no_output_____ ###Markdown - amount(매출액)에서 평균에서 크게 벗어나는 값이 존재함: - Max, Min 확인, 분석 과정에서 예측에 방해가 될 것 같은 이상치는 전처리 과정에서 정리하는 것이 좋음- amount(매출액)에 음수 값이 존재함: - 카드 결제 이력에서 음수 값은 환불을 나타내므로 이상치로 볼 수 있음. - 이상치 처리 방법: 평균값 대치, 0으로 대치, 제거 등 분석 방향1. 시계열 데이터 대회에서는 트리 기반의 앙상블 모델이 더 좋은 성능을 낸다.- 사람들의 성향, 외부 변수, 특수한 사건 존재로 데이터가 충분하지 않아 딥러닝 모델보다 트리 기반 모델이 더 좋은 성능을 낸다. - 트리 기반 모델: 의사결정 트리, 랜덤 포레스트 등 - 최근 개발된 트리 기반 모델: XGBoost, LightGBM 등 2. 최근 개발된 트리 기반 모델: XGBoost, LightGBM - XGBoost: 가장 좋은 성능과 빠른 속도로 많이 사용됨 - LightGBM: 마이크로소프트에서 공개한 오픈 소스로, XGBoost보다 더 빠른 학습 속도로 많이 사용됨 3. 그러나 본 팀은 시계열 모델을 사용하여 모델링(ARIMA)을 진행함- 그 이유: - 데이터가 시간의 흐름에 따라 발생된 데이터이고 시간에 영향을 받는 관측치이기 때문이다. - 사전 도메인 조사 중 상점을 운영하는 자영업자와 카드사에서 정한 카드 결제 방식, 다양한 고객의 특성 등 고려할 상황이 많기 때문에 각 상점의 매출 특성이 개별적으로 독립된 특징을 지니며, 이에 따라 일반화하기 어렵기 때문이다. 4.1.5. 분석환경 구축 가상 환경 구축- 설치 명령어: - conda create -n store_amount_prediction python=3.7 - conda activate store_amount_prediction- 가상 환경 커널 추가 - python -m ipykernel install --user --name store_amount_prediction --display-name "[store_amount_prediction]" rpy2(파이썬에서 R 객체 사용)- rpy2: 파이썬에서 R 패키지와 함수를 사용할 수 있게 지원하는 모듈- 시계열 모델링 시 R의 forecast, forecastHybrid 패키지 사용할 것- tzlocal 패키지: 윈도우에서 현지 시간대 정보와 함께 tzlocal 객체를 반환해 줌- 설치 명령어: - conda install -c r rpy2==2.9.4 - pip install tzlocal pandas와 numpy- pandas: 데이터 처리를 위해 제공하는 라이브러리. 시리즈, 데이터프레임, 패널 등 3가지 데이터 구조 사용 - 데이터구조를 R 객체로 변환할 때 pandas 1.0.0 이상의 버전에서는 호환이 안됨. 따라서 pandas 0.25.1 버전 설치- numpy: 행렬 연산을 위해 제공하는 라이브러리. pandas 0.25.1 버전과 호환되는 numpy 1.19.1 버전 설치 pmdarima(시계열 분석)- pmdarima: 파이썬에서 시계열 데이터를 분석하기 위한 패키지- 기존 R에서만 이용할 수 있었던 시계열 분석 방법론인 auto_arima 사용 가능- 설치 명령어: - pip install pmdarima==1.5.3 statsmodels(통계 분석)- statsmodels: 검정 및 추정, 회귀분석, 시계열 데이터 분석 등 통계분석 기능 제공- 기존 R에서만 이용할 수 있었던 휘귀분석과 시계열 분석 방법론이 사용 가능- 설치 명령어: - pip install statsmodels seaborn과 tqdm 패키지 설치- seaborn: 데이터 시각화- tqdm: 진행표시 바 제공- 설치 명령어: - pip install seaborn==0.11.0 - pip install tqdm==4.51.0 ###Code import rpy2 rpy2.__version__ import pandas pandas.__version__ # 데이터구조를 R 객체로 변환할 때 pandas 1.0.0 이상의 버전에서는 호환이 안됨. # 따라서 pandas 0.25.1 버전 설치 # 이 부분을 무시했다가 진행이 안돼서 conda search pandas, pip install pandas=0.25.1로 다시 진행했다. # numpy도 마찬가지 import numpy numpy.__version__ # pandas 0.25.1 버전과 호환되는 numpy 1.19.1 버전 설치 import pmdarima pmdarima.__version__ import statsmodels statsmodels.__version__ import seaborn seaborn.__version__ import tqdm tqdm.__version__ ###Output _____no_output_____ ###Markdown 4.2 데이터 전처리- 데이터 전처리는 많은 데이터 분석가가 가장 많은 시간과 노력을 투자하고 고민하는 과정이다. - EDA분석 후 데이터 전처리를 하기도 하지만, 시계열 데이터의 특성상 날짜 혹은 시간 기준으로 미리 전처리 작업을 해야 파생 변수를 생성하기 좋으므로 데이터 전처리 작업을 먼저 수행했다. 4.2.1 노이즈 제거- 주어진 데이터에 존재하는 노이즈 제거- 노이즈: 이상치, 결측치뿐 아니라 학습 알고리즘에 데이터를 주입할 때 예측의 정확도를 떨어뜨리는 모든 것 ###Code # 데이터 전처리에 필요한 모듈과 데이터를 불러오는 코드 import pandas as pd import numpy as np import seaborn as sns from tqdm import tqdm import matplotlib.pyplot as plt train = pd.read_csv('./funda_train.csv') # 데이터를 불러와 train 변수에 저장하기 train.head() # 첫 5줄 보기 ###Output _____no_output_____ ###Markdown - 9개의 열이 존재하고 region(지역) 변수에 결측치(NaN) 존재 모든 열에 결측치가 있는지 그래프로 확인하기 ###Code plt.figure(figsize=(13, 4)) # 가로13, 세로4인 그래프 그리기 plt.bar(train.columns, train.isnull().sum()) # bar 그래프로, 각 컬럼에서, isnull() 메소드를 이용해 결측치가 있는지 확인하고, 갯수를 sum()으로 계산 plt.xticks(rotation=45) # x축 이름의 기울기: 45도 ###Output _____no_output_____ ###Markdown - region(지역)과 type_of_business(업종) 속성에 결측치 존재함을 확인했다.- 현재 상점의 지역과 업종은 절반 이상이 결측치임이 보인다.- 해당 결측치는 범주형 속성이며, 대치할 방법도 없어 열 전체를 제거하기로 결정하였다. ###Code train = train.drop(['region','type_of_business'],axis=1) # 해당 열을 열 기준(axis=1)으로 제거 train.head() ###Output _____no_output_____ ###Markdown amount(매출액) 변수에 대한 이상치 존재 확인 ([상자그림](https://boxnwhis.kr/2019/02/19/boxplot.html)) ###Code plt.figure(figsize=(8, 4)) sns.boxplot(train['amount']) ###Output _____no_output_____ ###Markdown - 해당 상자그림은 상점 매출액의 이상치 즉, 매출액이 0 미만인 것을 확인하기 위함이다.- 매출액에 음수가 존재한다는 것이 의미하는 것은, 카드 거래 데이터에서 환불이 발생한 것이다.- 음수의 매출액은 상점 매출액을 예측할 때 방해가 되는 노이즈다. 매출 거래 내역의 과대 계산의 요소가 될 수 있다.- 따라서 환불 발생 현황을 확인하기 위해 매출액이 0 미만인 행을 출력하기로 한다. ###Code # 환불 발생 현황을 확인하기 위해 매출액이 0 미만인 행을 출력 train[train['amount']<0].head() ###Output _____no_output_____ ###Markdown 음수로 집계된 카드 결제 내역 제거 기준:- 환불 발생 시간 이전의 거래 내역 중 음수 매출액(환불)의 절댓값과 같은 행 제거: 이전 구매 이력을 제거하지 않으면 매출액 집계할 때 과대 계산됨- 즉, 환불 발생일(transactid_date)와 환불 발생 시간(transacted_time) 이전 데이터 중에서 카드 아이디(card_id)가 같고 음수 매출액(amount)이 절댓값과 양수 매출액이 같은 거래 중 최근 거래를 찾아 해당 인덱스를 제거한다. 거래일과 거래 시간을 합친 datetime 변수를 만들고, 환불 거래를 제거하는 함수 정의- 비슷한 성격의 속성을 합쳐 컬럼수를 줄였다.- 이후 시간 변수를 월 단위로 다운샘플링 한다. ###Code train.transacted_date.dtype ###Output _____no_output_____ ###Markdown - 'O': (Python) objects을 뜻함 ###Code # 거래일와 거래시간을 합친 변수를 생성합니다. # object 타입을 datetime64[ns] 타입으로 바꿈 train['datetime'] = pd.to_datetime(train.transacted_date + " " + train.transacted_time, format='%Y-%m-%d %H:%M:%S') train['datetime'].head() # train.describe() # train.corr() # # 교재에는 없으나 # # 음수 매출액이 제거된 데이터에서 각 컬럼 간 상관관계 확인 # # 본 대회 3등한 지성민님 코드 참고 # train_corr = train.corr() # # 상관관계 그래프 # fig, ax = plt.subplots() # fig.set_size_inches(10, 7) # sns.heatmap(train_corr, annot=True) # # seaborn의 heatmap라는 그래프를 그림 # # 내부 속성중 annot은 annotation. 각 셀의 값을 표시할지 결정하는 것 ###Output _____no_output_____ ###Markdown 각 컬럼 간 상관관계는 왜 확인하는가?- 시계열 데이터는 일정한 시간동안 수집된 일련의 순차적으로 정해진 데이터 셋의 집합으로, 시간에 관해 순서가 매겨져 있다는 점과 연속한 관측치는 서로 상관관계를 갖고 있다는 점에서 추가적으로 확인하는 것이 좋다.상관관계를 확인하는데 있어서 주의할 점:- 연속형(숫자로 표현 가능한) 데이터에 대해서만 상관관계 분석이 가능하다.- -1부터 1까지의 값으로 상관관계의 크기를 보여준다. 보통 0.7보다 클 경우 강한 양의 상관관계가, 0.7보다 작을 경우 강한 음의 상관관계(하나가 증가할 경우 다른 하나는 감소한다)가 있다고 판단한다.- 증가하는 방향성에 대한 것인지 즉 인과관계를 의미하지는 않는다. X와 Y 가 유사한 경향으로 증가하거나, 하나가 증가할 때 다른 하나는 감소하는 경향이 있다는 정도를 설명하는 것이지, X 때문에 Y가 혹은 그 반대의 경우를 설명하는 것은 아니다. 환불 거래를 제거하는 함수를 정의 ###Code ## 환불 거래를 제거하는 함수를 정의 def remove_refund(df): # 인자는 df으로 넣기 refund = df[df['amount']<0] # 매출액 음숫값 데이터(매출액 0 미만이 값)를 추출해서 refund(환불 거래) 변수에 저장 non_refund = df[df['amount']>0] # 매출액 양숫값 데이터를 추출해서 not_refund(매출 거래) 변수에 저장 removed_data = pd.DataFrame() # 제거한 데이터프레임을 removed_data 변수에 저장 for i in tqdm(df.store_id.unique()): # tqdm를 사용해서 어디까지 진행되었는지 확인 divided_data = non_refund[non_refund['store_id']==i] # 매출액이 양숫값인 데이터(매출 거래)를 상점별로 나눔 divided_data2 = refund[refund['store_id']==i] # 매출액이 음숫값인 데이터(환불 거래)를 상점별로 나눔 for neg in divided_data2.to_records()[:]: # 환불 데이터를 레코드 별로 차례대로 검사합니다. refund_store = neg['store_id'] # 상점 아이디를 추출합니다. refund_id = neg['card_id'] # 환불 카드 아이디를 추출합니다. refund_datetime = neg['datetime'] # 환불 시간을 추출합니다. refund_amount = abs(neg['amount']) # 매출 음숫값의 절댓값을 구합니다. ## 환불 시간 이전의 데이터 중 카드 이이디와 환불액이 같은 후보 리스트를 뽑습니다. refund_pay_list = divided_data[divided_data['datetime']<=refund_datetime] # 매출 거래 시간보다 환불 시간이 더 늦은 경우의 리스트를 만들어 refund_pay_list = refund_pay_list[refund_pay_list['card_id']==refund_id] # 그 경우의 카드 아이디를 뽑고, refund_pay_list = refund_pay_list[refund_pay_list['amount']==refund_amount] # 환불액을 뽑는다. # 후보 리스트가 있으면 카드 아이디, 환불액이 같으면서 가장 최근시간을 제거합니다. if(len(refund_pay_list)!=0): # 환불거래 리스트 길이(갯수)가 0이 아니면(환불거래가 있으면) refund_datetime = max(refund_pay_list['datetime']) # 가장 최근 시간을 구합니다 noise_list = divided_data[divided_data['datetime']==refund_datetime] # 가장 최근시간의 가장 최근 시간과 noise_list = noise_list[noise_list['card_id']==refund_id] # 환불 카드 아이디, noise_list = noise_list[noise_list['amount']==refund_amount] # 환불액을 뽑아서 divided_data = divided_data.drop(index=noise_list.index) # 인덱스를 통해 제거합니다. # 제거한 데이터를 데이터프레임에 추가합니다. removed_data = pd.concat([removed_data,divided_data],axis=0) # pd.concat(df): df를 인자로 받고, 데이터프레임을 이어 붙여주는 함수이다. # default는 행 기준이며, 인덱스를 그대로 가져온다 # (인덱스 재배열 및 수정 옵션: ignore_index=True) return removed_data ###Output _____no_output_____ ###Markdown 노이즈 제거 후 상자그림 통해 결과 확인 ###Code ## 환불 거래 제거 함수를 통해 환불 거래를 제거합니다. positive_data = remove_refund(train) # 인수에 train을 넣고 노이즈를 제거하여 positive_data 변수에 저장 plt.figure(figsize=(8, 4)) # 그래프 사이즈는 가로 8인치, 세로 4인치 sns.boxplot(positive_data['amount']) # 매출액 ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [13:52<00:00, 2.36it/s] ###Markdown - 카드 거래 데이터에서 음수 매출액이 제거되었다! ###Code # 음수 매출액이 제거된 데이터에서 유일한 값의 길이(갯수)를 찾고 # 2018년 12월 1일보다 큰 데이터를 나눔(최근 3개월) store_count=len(positive_data.store_id.unique()) len(positive_data[positive_data['datetime']>="2018-12-01"])//store_count ###Output _____no_output_____ ###Markdown 4.2.2 다운 샘플링- 다운 샘플링: 시계열 데이터에서 시간 간격을 넓게 재조정해 데이터 샘플 수를 줄이는 것이다. - 시간 구간이 작아지면 데이터 양이 증가한다고 해서 업-샘플링(up-sampling)이라 하고, - 시간 구간이 커지면 데이터 양이 감소한다고 해서 다운-샘플링(down-sampling)이라 한다. [참고](https://seong6496.tistory.com/85) - 예) [년,월,일,시,분,초] 데이터를 월 단위 다운 샘플링 하면 [년, 월]로 샘플링 된다.- 시간 간격을 좁게 해 시계열 모델을 만들면 예측해야 하는 구간이 커져 불확실성이 증가하므로, 다운 샘플링 과정을 통해 샘플 수를 줄이고 예측 구간을 작게 해서 불확실성을 줄여야 한다. 주어진 데이터의 시간 간격 확인 ###Code # 5개의 행을 출력합니다. positive_data.head() ###Output _____no_output_____ ###Markdown - 거래 시간을 기준으로 분 단위로 기록된 결제 이력을 확인할 수 있다.- 본 대회의 문제는 미래 3개월 매출액의 총합이므로, 3개월은 일 단위로 계산했을 때 약 90일이며, 시간 단위로 계산했을 때 2,160시간이다.- 데이터는 분 단위로 되어 있으며, 예측할 구간이 많다는 단점이 있다.- 시계열 모델은 예측 구간이 많아지면 불확실성이 커져 정확도가 떨어지는 문제가 발생한다.- 따라서 시간 간격을 재조정해 예측 구간을 최소화 해야 한다. 월 단위 다운 샘플링 함수를 정의 (예측 구간을 최소하 하기 위해)- df.str.slice(): 인덱스 사이 값 반환(2016-06-01이라면, stop=7이므로, 7번째 인덱스인 2016-06까지의 값을 반환함) [참고](https://blog.naver.com/wideeyed/221603778414)- df.drop_duplicates(): 컬럼 'year_month'의 중복값을 제거함- df.reset_index(): 전처리 과정을 거치게 되면 인덱스가 뒤죽박죽인 경우가 많은데, 이럴 때 인덱스를 다시 처음부터 재배열 해주는 유용한 함수 [참고](https://kongdols-room.tistory.com/123) - drop: 인덱스로 세팅한 열을 데이터프레임 내에서 삭제할지의 여부 - inplace: 원본 객체를 변경할지의 여부- 각 상점의 다운 샘플링을 진행할 때 매출이 발생하지 않았던 월은 2로 대치(fillna(2)) - 0으로 대치하지 않은 이유: 이후 시계열 데이터의 진동 폭을 안정화하기 위해 로그 정규화를 진행하기 때문 - 1 이하 값은 로그 정규화를 할 때 음수나 무한대가 나오기에 2로 대치함- 상점의 매출 발생이 시작된 월부터 데이터 유지함- 시간 간격을 월 단위로 재조정해 다운 샘플링 진행함 ###Code # 월 단위 다운 샘플링 함수를 정의 def month_resampling(df): # 인자는 df new_data = pd.DataFrame() # 년도와 월을 합친 변수를 생성합니다. df['year_month'] = df['transacted_date'].str.slice(stop=7) # 7번째 인덱스까지 출력 (상단 함수 설명) # 데이터의 전체 기간을 추출합니다. year_month = df['year_month'].drop_duplicates() # 중복값 제거 (상단 함수 설명) # 상점 아이디별로 월 단위 매출액 총합을 구합니다. downsampling_data = df.groupby(['store_id', 'year_month']).amount.sum() # 매출액 총합 구하기 downsampling_data = pd.DataFrame(downsampling_data) downsampling_data = downsampling_data.reset_index(drop=False,inplace=False) # df.reset_index(): 행 인덱스 초기화 # drop: 인덱스로 세팅한 열을 데이터프레임 내에서 삭제할지의 여부 # inplace: 원본 객체를 변경할지의 여부 for i in tqdm(df.store_id.unique()): # 상점 아이디의 유일한 값으로 접근 # 상점별로 데이터를 처리합니다. store = downsampling_data[downsampling_data['store_id']==i] # 각 상점의 처음 매출이 발생한 월을 구합니다. start_time = min(store['year_month']) # 모든 상점을 전체 기간 데이터로 만듭니다. store = store.merge(year_month,how='outer') # 데이터를 시간순으로 정렬합니다. store = store.sort_values(by=['year_month'], axis=0, ascending=True) # 오름차순, 즉 시간순으로 정렬 store['amount'] = store['amount'].fillna(2) # 매출이 발생하지 않는 월은 2로 채웁니다. store['store_id'] = store['store_id'].fillna(i) # 상점 아이디 결측치를 채웁니다. store = store[store['year_month']>=start_time] # 처음 매출이 발생한 월 이후만 뽑습니다. new_data = pd.concat([new_data,store],axis=0) return new_data # 환불 제거 데이터를 월 단위로 다운 샘플링합니다. resampling_data = month_resampling(positive_data) resampling_data['store_id'] = resampling_data['store_id'].astype(int) # int로 타입을 바꿔줌 resampling_data col=['store_id','datetime','amount'] # 컬럼 속성을 지정해줌 positive_data.loc[:,col] # 데이터 확인, 행 전부와 위에서 지정한 컬럼을 가져옴 ###Output _____no_output_____ ###Markdown - 카드 결제 이력 데이터에 대해 월 단위로 시간 간격을 재조정한 결과 확인할 수 있다.- 미래 3개월의 매출을 예측할 때 예측 구간이 3개로 줄었다.- 다운 샘플링은 예측 구간을 최소화해 불확실성을 줄여준다. 하지만, 시간 간격을 너무 크게 재조정하면 작은 데이터 샘플로 인해 정확도가 오히려 떨어지므로, 지속적인 탐색을 통해 적절한 다운 샘플링 시간 간격을 찾는 것이 주요 과제이다. 4.2.3 날짜 지정 범위 생성과 시리즈 객체 변환시계열 데이터 분석을 위해서는 데이터 프레임을 시리즈 객체로 변환해야 한다.- 시계열 데이터의 __정상성__을 확인하기 위한 단위근 검정과 시계열 모델 구축 과정에서 시리즈 객체를 사용한다. - 시계열 데이터의 __정상성__: 평균과 분산 같은 통계적 특성이 시간에 대해 일정한 성질이다. ###Code ## 데이터 타입을 출력 print(type(resampling_data)) # 데이터프레임이므로, 시리즈로 변환시켜야 한다. ###Output <class 'pandas.core.frame.DataFrame'> ###Markdown 데이터프레임을 Series로 변환하는 함수 정의- 시리즈 객체로 변환하기 전에 인덱스를 시간 변수로 정의한다. 시간 변수의 날짜 지정 범위를 생성해야 하는데, 판다스에서 date_range 함수가 그 기능을 한다. 이를 활용해 인덱스에 날짜 지정 범위를 생성하고 시리즈 객체까지 변환할 것이다.- 상점 아이디 별로 인덱스에 날짜 지정 범위를 생성, 시리즈로 변환한다.- 주어진 데이터에서는 1,967개 상점 데이터를 시리즈 객체로 변환해야 하므로, 이를 구현하는 함수를 정의한다.- date_range 의 옵션 중 freq는 빈도(frequency)를 설정한다. - BM: business month end frequency 월의 공휴일이 아닌 마지막 날짜 [참고1](https://workingwithpython.com/date_range/), [참고2](https://rfriend.tistory.com/503) ###Code # 데이터프레임을 Series로 변환하는 함수 def time_series(df, i): # 다운 샘플링한 데이터프레임과 상점 아이디를 인자로 넣는다 # 상점별로 데이터를 뽑습니다. store = df[df['store_id']==i] # 판다스의 date_range 함수를 통해 시리즈 객체의 인덱스에 지정할 날짜 범위를 생성한다 ## 날짜 지정 범위는 영업 시작 월부터 2019년 3월 전까지 영업 마감일 기준 index = pd.date_range(min(store['year_month']),'2019-03',freq='BM') # date_range 의 옵션 중 freq는 빈도(frequency)를 설정한다. # BM: business month end frequency 월의 공휴일이 아닌 마지막 날짜 ## 시리즈 객체로 변환 ts = pd.Series(store['amount'].values,index=index) return ts ###Output _____no_output_____ ###Markdown 왜 시리즈 객체로 변환했는가?- 차후 시계열 데이터 분석(ADF-검증)을 위해서는 시리즈 객체(해당 검증의 인자가 시리즈)가 있어야 한다. ###Code # 상점 아이디가 0번인 데이터를 시리즈 객체로 변환 store_0 = time_series(resampling_data, 0) store_0 ###Output _____no_output_____ ###Markdown - 인덱스가 지정한 날짜 범위와 영업 마감일 기준으로 출력되었다. ###Code # 상점 아이디가 1번인 데이터를 시리즈 객체로 변환하고 # 데이터 타입을 출력 store_1 = time_series(resampling_data, 1) print(type(store_1)) store_1.plot() sns.boxplot(store_1) # 상점 아이디가 2번인 데이터를 시리즈 객체로 변환하고 # 시계열 그래프 그리기 store_2 = time_series(resampling_data, 2) store_2.plot() sns.boxplot(store_2) ###Output _____no_output_____ ###Markdown 그래프를 보면,- x축에 지정한 날짜 범위(월 단위), y축에 매출액이 출력되었다. - 그래프는 매출액의 변동이 있다는 것을 알려준다. - 2017년은 큰 변화가 아닌 작은 변동 폭 - 2018년 3월 이후부터 매출액의 상승이 커지다가, 2018년 7월에 매출액이 최고를 기록함 - 2018년 7월 최고액을 찍고 급격히 감소- 2017년 데이터는 작은 변동 폭을 기록하는데, 이러한 변화는 큰 트렌드가 아니기 때문에 로그 정규화를 하면 변동이 안정화되어 더 큰 트렌드를 파악할 수 있다.- 시계열 모델 구축에서는 매출액의 변동 계수를 고려하여 작은 변동을 안정화하는 로그 정규화를 진행할 예정이다. - 변동 계수: 매출액 표준편차를 평균으로 나눈 지표 - 표준편차: 자료의 산포도를 나타내는 수치로, 분산의 양의 제곱근으로 정의된다. 표준편차가 작을수록 평균값에서 변량들의 거리가 가깝다. (위키백과) 왜 로그 정규화일까? [참고](https://leebaro.tistory.com/entry/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EB%B6%84%EC%84%9D-%EC%8B%9C-%EC%8B%9D%EC%97%90-%EB%A1%9C%EA%B7%B8%EB%A5%BC-%EC%B7%A8%ED%95%98%EB%8A%94-%EC%9D%B4%EC%9C%A0)- 데이터 간 편차를 줄여서 데이터가 한쪽으로 치우치거나 몰리는 것을 방지하여 정규성을 높이고 분석(회귀분석 등)에서 정확한 값을 얻기 위함이다.- 로그는 큰 수를 작게 만들고 복잡한 계산을 간편하게 하기 위해 사용한다. 로그를 취하는 순간 그 수는 지수가 되어버리니, 값이 작아 진다. 데이터 전처리 정리1. 데이터의 불필요한 노이즈를 찾아 제거2. 다운 샘플링으로 시계열 모델의 불확실성을 줄임3. 상점별 날짜 지정 범위를 생성하고 시리즈 객체로 변환 후 시계열 모델이 무리 없이 작동하게 준비함 4.3 탐색적 데이터 분석- 상점별 매출액 시계열 그래프를 통해 매출 특성을 파악한다.- 시계열 데이터의 평균과 분산이 안정적인지 확인하기 위해 ADF-TEST(이후 설명) 진행하고 이를 통해 시계열 데이터가 정상 시계열인지 확인한다. 4.3.1 상점별 매출 특성- 총 1,967개의 상점드링 어떤 특징을 가지는지 특성을 파악한다.- 크게 3 가지로 분류하였다. 1) 계절성이 있는 상점- 우리가 예측할 시기는 계절 상 봄(3,4,5월)이고, 1학기이고, 상반기이며, 벚꽃축제 등 꽃과 식물과 관련된 축제가 많고, 가정의 달로 인한 성수기 황금연휴가 있는 시기이다.- 사계절을 중심으로, 특정 계절을 중심으로 장사하거나 특정 계절을 중심으로 장사하지 않는 특징을 지닌 상점이 존재하는지 알아볼 것이다.- 즉, 계절성이 있는 상점을 식별하고, 해당 상점의 봄 매출 특성을 정리해 결괏값을 예측할 것이다. ###Code # 예시로, 계절성 특징을 지닌 상점 아이디 257번인 데이터를 시리즈 객체로 데이터 출력 store_257 = time_series(resampling_data, 257) store_257 ###Output _____no_output_____ ###Markdown - 앞서 데이터 전처리 시 매출이 발생하지 않았던 월은 전부 2로 대치했다. 2는 매출이 발생하지 않은 월이다.- 즉, 257번 상점은 11~3월 매출이 0이다. ###Code # 257번 상점의 시계열 그래프 그리기 store_plot_257 = store_257.plot() fig = store_plot_257.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 그래프를 보면,- 겨울 시즌(11,12,1,2,3월)에는 매출이 발생하지 않고 여름 시즌(7,8월)에 많은 매출을 기록했다. - 즉, 이번 대회에서 예측할 3~5월에는 많은 매출액이 발생하지 않을 것으로 예측할 수 있다. ###Code # 상점 아이디가 2096번인 데이터를 시리즈 객체로 데이터 출력 store_2096 = time_series(resampling_data, 2096) store_2096 # 상점 아이디가 2096번인 상점의 시계열 그래프 store_plot_2096 = store_2096.plot() fig = store_plot_2096.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 2096번 상점을 보면,- 겨울 시즌(1,2,3월)에 낮은 매출액을 기록하다가 4월 매출액이 급상승하는 패턴을 보여준다.- 즉, 이번 대회에서 예측할 2019년 3~5월 기간에 매출 급상승을 예상할 수 있다.정리하자면,- 계절성이 있는 상점의 번호와 특징을 기록하고, 예측 모델이 정확하게 계절성을 반영해 예측했는지를 비교 분석해 오차를 줄였다. 2) 추세가 있는 상점추세가 있는 상점: 매출액이 꾸준히 증가하는 상점 또는 매출액이 꾸준히 감소하는 상점- 증가 요인: SNS 마케팅, 유튜브 광고 등으로 매출이 증가하거나 반응이 폭발적인 상품을 취급- 감소 요인: 위생 단속 적발, 취급 제품의 문제점 부각, 상저 이미지 추락 ###Code # 상점 아이디가 335번인 상점의 데이터를 시리즈 객체로 변환하고 # 시계열 그래프 그리기 store_335 = time_series(resampling_data, 335) store_plot_335 = store_335.plot() fig = store_plot_335.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 그래프를 보면 매출액 증가와 감소 추세 모두를 지녔다.- 매출액 증감 등의 변동은 매월 있어왔다. 이러한 월 단위의 작은 변동보다는 기간을 1년 단위로 해서 추세를 파악할 필요가 있다.- 데이터의 전체 기간을 절반으로 나눴을 때: - 2016년 6월부터 2017년 10월까지: 매출액 감소 추세 - 2017년 111월부터 2019년 2월까지: 매출액 증가 추세- 2019년 3~5월 매출 예측할 때는 증가 추세임을 고려해야 한다. ###Code # 상점 아이디가 510번인 상점의 데이터를 시리즈 객체로 변환하고 # 시계열 그래프 그리기 store_510 = time_series(resampling_data, 510) store_plot_510 = store_510.plot() fig = store_plot_510.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 그래프를 보면,- 2016년 12월부터 2017년 9월까지 증가하다가,- 201년 9월 이후에는 꾸준히 감소 추세이다.정리하자면,- 매출액 추세가 있는 상점의 번호와 특징을 기록하고 예측 모델이 정확하게 추세를 반영해 예측했는지를 비교 분석해 오차를 줄였다. 3) 휴업 중인 상점휴업 중인 상점: 몇 개월 동안 매출이 발생하지 않은 상점- 중간에 폐업한 것으로 예측되는 상점도 있고, 어떤 상점은 어떠한 이유로 수개월 동안 매출이 발생하지 않다가 다시 매출이 발생한 상점도 존재한다. - 리모델링 위해 휴업한 상점, 매출 부진으로 인해 폐업한 상점, 계약 만료로 인해 휴업 및 폐업한 상점 등- 2019년 1,2월의 매출이 0이거나 그 이전부터 매출액이 존재하지 않는 상점이다. ###Code # 상점 아이디가 111번인 데이터를 시리즈 객체로 데이터 출력 store_111 = time_series(resampling_data, 111) store_111 # 상점 아이디가 111번인 상점의 시계열 그래프 store_plot_111 = store_111.plot() fig = store_plot_111.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 데이터와 그래프를 보면,- 2017년 10월부터 2017년 12월까지, 2018년 10월부터 2019년 2월까지: 매출 0- 2017년과 비교했을 때 2018년의 휴업 기간이 더 길었다.- 직관적으로 2019년 3~5월에는 매출이 발생하지 않을 것으로 예측할 수 있다. ###Code # 상점 아이디가 279번인 데이터를 시리즈 객체로 데이터 출력 store_279 = time_series(resampling_data, 279) store_279 # 상점 아이디가 279번인 상점의 시계열 그래프 store_plot_279 = store_279.plot() fig = store_plot_279.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 데이터와 그래프를 보면,- 2018년 3월부터 2018년 10월까지 장기 휴업을 했다가 2018년 11월 영업을 다시 시작했다.- 휴업 전 매출이 감소했고, 휴업 후 영업을 재개했으나 매출이 감소하면서 휴업 가능성이 보인다.정리하자면,- 휴업과 폐업으로 예상되는 상점의 번호와 특징을 기록하고 예측 모델이 정확하게 반영해 예측했는지를 비교 분석해 오차를 줄였다. 4.3.2 시계열 데이터의 정상성시계열 데이터의 정상성: 데이터가 시간의 변동에 따라 평균과 분산이 일정하다는 뜻이다.- 시계열 데이터의 정상성을 판단하기 위해 ADF-TEST를 사용하였다.- 이후 차분을 통해 비정상 시계열을 정상 시계열로 변환하는 작업을 진행했다. ADF-TEST [참고](https://chukycheese.github.io/translation/statistics/augmented-dickey-fuller-test/)ADF-TEST: Augmented Dickey-Fuller-Test- 시계열 데이터가 정상성을 가지는지 판단할 때 사용되며, 회귀 분석 결과로 나온 계수를 검정 통계량으로 사용한다. 이를 통해 시계열 데이터가 정상성을 가지는지 검정하고, 정상성을 가지지 않으면 차분을 통해 평균을 일정하게 만들어줘야 한다. - 차분: 비정상 시계열을 평균이 일정한 정상 시계열로 바꾸는 기법 - 검정에 사용하는 가설은 다음과 같다. - 귀무가설(H0): 자료에 단위근이 존재한다. - 대립가설(H1): 시계열 자료가 정상성을 만족한다(또는 추세 정상성을 만족한다). 하지만 대립가설은 어떤 방정식을 사용하느냐에 따라 조금씩 다르다. ###Code # 상점 아이디가 0번인 상점의 시계열 그래프 store_0 = time_series(resampling_data, 0) store_plot_0 = store_0.plot() fig = store_plot_0.get_figure() fig.set_size_inches(13.5,5) # 상점 아이디가 257번인 상점의 시계열 그래프 store_257 = time_series(resampling_data, 257) store_plot_257 = store_257.plot() fig = store_plot_257.get_figure() fig.set_size_inches(13.5,5) ###Output _____no_output_____ ###Markdown 상점 0번과 상점 257번을 비교해보면,- 0번 상점은 매출액 시계열의 평균과 분산이 일정하다- 257번 상점은 계절성을 띄고 평규노가 분산의 변동 폭이 크다. 이제 ADF 검정을 사용해 두 상점이 정상 시계열인지 확인할 것이다.- ADF 검정은 통계학에서 시행하는 가설 검정의 절차를 따른다.- 검정을 시행하기 전 257번 상점의 매출액 시계열에 대한 가설을 설정한다. - 귀문가설: 시계열 자료가 정상 시계열이 아니다. - 대립가설: 시계열 자료가 정상성을 만족한다. - ADFTest(): 매개변수 alpha는 가설 검정을 할 때 p-값의 기준점을 의미한다. 디폴트는 0.05이며 실수를 입력한다.- p_val: p-값(p-value)은 귀무 가설(null hypothesis)이 맞다는 전제 하에, 표본에서 실제로 관측된 통계치와 '같거나 더 극단적인' 통계치가 관측될 확률이다. 여기서 말하는 확률은 '빈도주의' (frequentist) 확률이며, p-값(p-value)는 관찰된 데이터가 귀무가설과 양립하는 정도를 0에서 1 사이의 수치로 표현한 것이다. [위키백과](https://ko.wikipedia.org/wiki/%EC%9C%A0%EC%9D%98_%ED%99%95%EB%A5%A0)- should_diff(): 인자로 시리즈 객체를 받고, p-value 값과 차분이 필요한지를 T/F로 반환한다. ###Code # pmdarima 패키지에 있는 ADFTest 클래스를 임포트 from pmdarima.arima import ADFTest # 상점 아이디가 0번인 데이터를 시리즈 객체로 변환 store_0 = time_series(resampling_data, 0) # ADF-Test 시행 p_val, should_diff = ADFTest().should_diff(store_0) print('p_val : %f , should_diff : %s' %(p_val, should_diff)) # 상점 아이디가 257번인 데이터를 시리즈 객체로 변환 store_257 = time_series(resampling_data, 257) # ADF-Test 시행 p_val, should_diff = ADFTest().should_diff(store_257) print('p_val : %f , should_diff : %s' %(p_val, should_diff)) ###Output p_val : 0.093614 , should_diff : True ###Markdown 0번 상점과 257번 상점의 p-값을 보면,- 매개변수 alpha는 p-값의 기준인 0.05인데, 이보다 낮으면 대립 가설이 채택되고 높으면 귀문 가설이 채택된다.- 즉, 257번 상점은 높으므로 정상 시계열이 아니며 차분이 필요하다.|상점 아이디|p-값|정상시계열|차분(should_diff)||:---:|:---:|:---:|:---:||0|0.022488|O|필요없음||257|0.093614|X|필요함|이렇게 1,967개의 상점을 개별적으로 가설 검정을 하면 비효율적이므로, - 1,967개 상점의 ADF 검정의 p-값을 파이썬의 리스트 자료형에 저장하여 상자 그림으로 분포를 확인한다.- p-값이 0.05보다 작은 상점 개수를 계산한다.- 매출이 수개월간 발생하지 않는 상점은 ADF 검정이 오류가 발생하여 예외처리를 진행한다.- 이를 구현한 함수를 정의한다. 상점 아이디별 차분 여부 결정하기 위한 함수 ###Code ## ARIMA 모델의 차분 여부를 결정하기 위한 단위근 검정 def adf_test(y): return ADFTest().should_diff(y)[0] adf_p = [] # 검정하고 난 후 p-값의 리스트 count = 0 # 초기화 skipped = [] # p-값이 0.05보다 높았던 경우나 이외의 값의 리스트 for i in tqdm(resampling_data['store_id'].unique()): ts = time_series(resampling_data,i) try: p_val = adf_test(ts) if p_val < 0.05: count += 1 adf_p.append(p_val) except: skipped.append(i) plt.figure(figsize=(8, 4)) sns.boxplot(adf_p) ###Output 100%|█████████████████████████████████████████████████████████████████████████████| 1967/1967 [00:06<00:00, 307.22it/s] ###Markdown 1,967개 상점의 ADF 검정을 시행한 p-value의 분포를 보면 대부분 상점이 기준점인 0.05보다 p-value가 높다는 것을 확인할 수 있다.- 따라서 시계열 모델링을 진행하기 전에 비정상 시계열 자료를 차분을 통해 정상 시계열로 변환했다. ###Code # p-value가 0.05보다 작은 상점의 개수 print(count) # ADF-Test 오류 상점 개수 print(skipped) print(f"WarningCount: {len(skipped)}, store_id_list:{skipped}") # 코드 상 나오지 않아 교재에서 지정한 상점을 출력함 # # ADF-Test 오류 상점 개수 # if skipped: # print(f"WarningCount: {len(skipped)}, store_id_list:{skipped}") # WarningCount: 1, store_id_list:[795] # 상점 아이디가 795번인 데이터를 시리즈 객체로 변환 store_795 = time_series(resampling_data, 795) # ADF-Test 시행 p_val, should_diff = ADFTest().should_diff(store_795) print('p_val : %f , should_diff : %s' %(p_val, should_diff)) ###Output p_val : nan , should_diff : False ###Markdown 하지만 요류가 발생하기도 했다.- 795번 상점은 매출액이 수 개월간 발생하지 않았다. 4.4 모델 구축과 검증본 팀은 파이썬에서 시계열 모델링을 쉽게 하기 위해 다양한 시계열 모델링 함수를 제공하는 R의 forecast 패키지를 파이썬과 연동했다.- 자기회귀누적이동평균(ARIMA) 모델, 지수평활법, STL 분해를 적용한 지수평활법 등 시계열 모델을 이용해 상점의 매출을 예측했다. 4.4.1 파이썬에서 R 시계열 패키지 forecast를 통한 모델링rpy2에 설치된 R 패키지 utils를 이용하여 설치한다. ###Code from rpy2.robjects.packages import importr # rpy2 내의 패키지를 불러올 importr 클래스 utils = importr('utils') # utils 패키지를 임포트 utils.install_packages('forecast') # r의 forecast 패키지 설치. utils.install_packages('forecastHybrid') # r의 forecastHybrid 패키지 설치 ###Output _____no_output_____ ###Markdown utils 패키지의 install_packages 함수를 이용해 r의 forecast 패키지, r의 forecastHybrid 패키지를 설치한다.- forecast: 다양한 시계열 모델링 기능을 제공- forecastHybrid: 시계열 모델을 쉽게 앙상블 예측하는 기능을 제공위 코드 실행하면 'Secure CRAN mirrors' 창이 뜨는데, 0-cloud[https]를 선택하고 확인하면, R 패키지를 설치할 수 있다.![Secure CRAN mirrors](https://www.waterloohydrogeologic.com/help/hga/r_console_cran_mirrors.png) R 패키지와 r의 함수- rpy2.robjects: r 함수를 파이썬에서 실행할 수 있게 해주는 모듈- pandas2ri: 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 r의 forecast 패키지를 이용해 상점 아이디가 0번인 상점에 대해 ARIMA 모델로 예측값을 생성한 예시:- robjects.r(): - r코드로 정의된 함수와 r의 내장함수를 파이썬에서 사용할 수 있게 해준다. - r의 시계열 자료형을 생성해주는 ts 함수와 vector를 생성해주는 c 함수를 사용할 수 있게 한다.- auto_arima: - str형식과 r 코드로 정의된 함수 - forecast 패키지를 로드하고 auto.arima 함수를 이용해 모델을 생성, 상점의 미래 3개월 매출액을 변환해준다. - r의 데이터프레임 자료형으로 예측값을 변환하므로, pandas2ri의 rpy2 함수를 사용하여 r 자료형을 파이썬 자료형으로 변환한다.- ts(): - start 인자에는 영업 시작 년도와 월을 r의 자료형인 벡터의 형태로 넘긴다. - frequency 인자에는 1년이 12개월이므로, 12를 넣는다.- np.sum(): - 3개월 매출액의 총합계 계산 ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 # pandas2ri를 활성화 pandas2ri.activate() auto_arima = """ function(ts){ library(forecast) # forecast 패키지 로드 d_params = ndiffs(ts) # ndiffs() 시계열 자료의 차분 횟수 계산 model = auto.arima(ts, max.p=2, d=d_params) # p부분차수, d차분차수 auto.arima 모델 생성 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값(3개월의 평균)을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 auto_arima = robjects.r(auto_arima) ts = robjects.r('ts')# r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 store_0 = resampling_data[resampling_data['store_id']==0] # 0번 상점 start_year = int(min(store_0['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store_0['year_month'])[5:]) # 영업 시작 월 # R의 ts 함수로 r의 time series 자료형으로 변환 train = ts(store_0['amount'], start=c(start_year, start_month), frequency=12) store_0['year_month'] min(store_0['year_month']) min(store_0['year_month'])[:4] # 영업 시작 년도 처음 4개월 # auto_arima으로 ensemble model 앙상블 학습 forecast = auto_arima(train) type(forecast) np.sum(pandas2ri.ri2py(forecast).values) # 3개월 매출을 합산 ###Output _____no_output_____ ###Markdown 4.4.2 시계열 모델 선택과 검증3 가지 방법으로 1,967개 상점의 3개월 매출액의 총합을 생성하고, submission 파일을 생성해 리더보드 점수를 확인한다. 1) 자기회귀누적이동평균(ARIMA) 모델1. AR 모델과 MA 모델이란?- AR 모델: 자기회귀 모델이며, 미래 예측에 자기 과거를 사용하기 때문이다. - 자신의 이전 관측값이 자신의 이후 관측값에 영향을 주는 모델이다. - __AR 모델 수식__ $y_{t}$ = c + $\phi_{1}$$y_{t-1}$ + $\phi_{2}$$y_{t-2}$ + ··· + $\phi_{p}$$y_{t-p}$ + $ε_{t}$- MA 모델: 이동평균 과정 - 현재 시계열 데이터가 과거 잔차들의 가중평균으로 구성되는 모델이다. - __MA 모델 수식__ $y_{t}$ = c + $ε_{t}$ + $θ_{1}$$ε_{t-1}$ + $θ_{2}$$ε_{t-2}$ + ··· + $θ_{q}$$ε_{t-q}$ - 가중평균: 데이터 세트에서 숫자의 다양한 중요도를 고려하는 계산이다. 가중 평균을 계산할 때, 최종 계산이 이루어지기 전에 데이터 세트의 각 숫자에 미리 정해진 가중치를 곱합니다. 데이터 세트에 있는 값의 빈도를 동일하게 하기 위해 가장 자주 계산된다. - 잔차항: 백색잡음 - 백색잡음: 자기상관(autocorrelation)이 없는 시계열 - 자기상관: 시계열의 시차 값(lagged values) 사이의 선형 관계를 측정 - 현재 데이터: 과거 백색잡음의 평균값 2. 자기회귀누적이동평균(ARIMA) 모델이란?- AR 모델과 MA 모델을 섞은 모델이다. 두 모델을 결합하면 분석의 정확도도 많이 올라가지만, 수식도 아주 쉽게 결합한다.- 가운데 글자 I는 차분을 구하는 것을 의미한다.- - __ARIMA 모델 수식__ $y_{t}'$ = c + $\phi_{1}$$y_{t-1}'$ + ··· + $\phi_{p}$$y_{t-p}'$ + $θ_{1}$$ε_{t-1}$ + ··· + $θ_{q}$$ε_{t-q}$ + $ε_{t}$ - 차분: 평균이 일정하지 않은 시계열 데이터의 평균이 일정하게 해주는 기법이며, 연이은 관측값들의 차이를 계산한다. - yi의 과거 시차 값과 과거 시차 오차를 모두 포함한다. - ARiMA(p,d,q) 모델이라 한다. |식별 값|의미| |---|---| |p|자기회귀의 부분 차수. 즉 AR 모형 차수| |d|1차 차분이 포함된 정도. 즉 차분 차수| |q|이동평균의 부분 차수. 즉 MA 모형 차수| - 시계열 데이터가 정상성인 경우에 주로 사용되며, 초기 차분 단계를 한 번 이상 적용해 비정상성을 제거할 수 있다. - 차분 횟수를 결정할 때는 R의 forecast 패키지의 ndiff 함수가 적절한 차분 횟수를 반환해준다. - auto.arima 함수가 최적화된 AR항의 p값, MA항의 q값, I항에 대한 d값을 계산해준다. ARIMA 모델링 과정1. 데이터를 그래프로 나타내고, 특이한 관측값을 찾는다.2. 데이터가 정상성을 나타내지 않는다면, 데이터가 정상성을 나타낼 때까지 데이터를 가지고 1차 차분을 계산한다. 즉, 데이터를 d번 차분(differencing)한 후에 AIC를 최소화하여 p와 q를 고른다.3. ACF/PACF를 살펴본다. 4. 잔차의 ACF를 그리고 잔차의 포트맨토(portmanteau) 검정을 하여 모델에서 잔차를 확인한다. 백색잡음이 보이지 않는다면, 수정된 모델을 사용한다.5. 잔차가 백색잡음처럼 보이면, 예측값을 계산한다. 자기회귀누적이동평균 모델을 사용해 1,967개 상점의 미래 3개월 매출액 총합을 계산하는 코드 - str형식으로 정의된 auto_arima를 이용해 예측값을 생성한다.- 파라미터: - max.p(자기회귀의 부분 차수의 최대값)=2 - d값(차분 차수)은 ndiff로 계산한 값을 d_params 변수에 담아 고정 파라미터로 함 ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 # pandas2ri를 활성화 pandas2ri.activate() auto_arima = """ function(ts){ library(forecast) # forecast 패키지 로드 d_params = ndiffs(ts) # 시계열 자료의 차분 횟수 계산 model = auto.arima(ts, max.p=2, d=d_params) # auto.arima 모델 생성 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 auto_arima = robjects.r(auto_arima)# str 형식으로 정의된 auto_arima ts = robjects.r('ts')# r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 final_pred = [] for i in tqdm(resampling_data.store_id.unique()): store = resampling_data[resampling_data['store_id']==i] start_year = int(min(store['year_month'])[:4]) ## 영업 시작 년도 start_month = int(min(store['year_month'])[5:]) ## 영업 시작 월 # R의 ts 함수로 time series 데이터로 변환 train = ts(store['amount'], start=c(start_year, start_month), frequency=12) # 자동회귀누적이동평균 model forecast = auto_arima(train) # 3개월 매출을 합산, final_pred에 추가 final_pred.append(np.sum(pandas2ri.ri2py(forecast).values)) ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [02:12<00:00, 14.89it/s] ###Markdown - 반복문을 통해 상점 아이디별로 ARIMA 모델을 생성하고 리스트 자료형인 final_pred에 미래 3개월 매출액 총액을 예측해 상점별로 추가한다. ###Code submission = pd.read_csv('./submission.csv') submission['amount'] = final_pred submission.to_csv('submission.csv', index=False) submission ###Output _____no_output_____ ###Markdown - 제출했더니 리더보드 점수가 66위 844384.6545점 (2021년 5월 23일 19시 26분 기준) 2) 지수평활법- 단순 지수평활법: 가장 최근 관측값을 가장 중요하게 생각하고 이전의 모든 관측값은 미래를 예측할 때 아무런 정보도 주지 않는다고 가정하기에, 미래를 예측할 때 가장 최근에 관측된 값에 가중치를 더 많이 주며, 과거로 갈수록 가중치가 지수적으로 감소한다. - 추세나 계절성이 없을 때 주로 사용한다. $\widehat{y}_{T+1|T}$ = $\alpha$ $y_{T}$ + $\alpha$$(1-\alpha)$$y_{T-1}$ + $\alpha$$(1-\alpha)^2$$y_{T-2}$ + ··· - α는 0~1사이의 값이고, 평활 매개변수라고 부른다.- 가중치가 감소하는 비율은 α값을 이용해 조정한다. - 단순 지수평활법에서 가중치에 해당하는 계수는 잔차의 제곱합(SSE)을 최소화해 추정한다. - 잔차: $e_{t}$ = $y_{t}$ - $y_{t}'$ - 잔차의 제곱의 합을 최소화해 계수를 추정하는 방식은 회귀 모델의 회귀 계수를 추정할 때 자주 사용한다. - __SSE__(잔차의 제곱의 합) = $\sum_{t=1}^T $ ( $y_{t}$ - $\widehat{y}_{t|t-1})^2 $ = $\sum_{t=1}^T e_{t}^2$ 홀트의 선형추세 기법- 추세가 있는 데이터를 예측할 수 있게 단순 지수평활법을 확장한 방법|.|수식||---|:---||예측식| $\widehat{y}_{T+h|t}$ = $\iota_{t}$ + $hb_{t}$||수준식| lt = $\alpha$ $y_{t}$ + (1 - $\alpha$)($\iota_{t-1}$ + $b_{t-1}$)|| 추세식| $b_{t}$ = $\beta^ *$ ($l_{t}$ - $l_{t-1}$) + (1 - $\beta^ *$ )$b_{t-1}$| - 예측식과 두 개의 평활식(수준식, 추세식)을 포함한다. - 예측식: h단계에서 수준식 $l_{t}$과 추세식 $b_{t}$의 h배를 더한 값 - $l_{t}$: 시간 t에서 현재 시계열의 수준 추정값 - $b_{t}$: 시간 t에서 현재 시계열의 추세 추정값 - 수준식: 관측값의 가중평균 - α: 수준의 평활 매개변수 - 추세식: 추정된 추세의 이동평균 - β: 추세에 대한 평활 매개변수 - 즉, 단순 지수평활법에서 현재 시계열의 관측값의 가중평균과 추세의 이동평균을 고려했다. - R의 ets() 함수: 시계열 데이터에 대해 AIC 값을 최소로 하는 지수평활 모델을 추정한다. - AIC: 주어진 데이터셋에서 통계 모델의 상대적 품질을 나타내는 값. 이 값이 낮을수록 좋은 모델 - AIC = -2ln(L) + 2k - -2ln(L): 모델의 적합도 - k: 추정된 모델의 파라미터 개수 - L: 우도(나타난 결과에 따라 여러 가능한 가설들을 평가할 수 있는 측도) [참고](http://www.ktword.co.kr/abbr_view.php?m_temp1=3214) R의 ets() 함수를 이용해 1,967개의 상점에 가장 적합한 지수평활 모델을 찾고 예측하는 코드 ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 # pandas2ri를 활성화 pandas2ri.activate() ets = """ function(ts){ library(forecast) # forecast 패키지 로드 model = ets(ts) # AIC가 낮은 지수평활 모델을 찾음 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 ets = robjects.r(ets)# str 형식으로 정의된 ets ts = robjects.r('ts')# r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 final_pred = [] # 결과값 리스트 생성 for i in tqdm(resampling_data.store_id.unique()): store = resampling_data[resampling_data['store_id']==i] start_year = int(min(store['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store['year_month'])[5:]) # 영업 시작 월 # R의 ts 함수로 time series 데이터로 변환 train = ts(store['amount'], start=c(start_year, start_month), frequency=12) # 지수평활법 # 시계열 데이터에 대해 AIC 값(품질)을 최소로 하는 지수평활 모델을 추정 forecast = ets(train) # 3개월 매출을 합산, final_pred에 추가 final_pred.append(np.sum(pandas2ri.ri2py(forecast).values)) ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [16:14<00:00, 2.02it/s] ###Markdown - Str 형식과 r코드로 정의된 ets 함수는 현재 시계열 데이터에 적합한 지수평활 모델을 찾고 예측값을 반환한다.- 이를 파이썬에서 사용할 수 있게 robjects의 r() 함수를 사용했다.- 반복문을 통해 상점에 지수평활 모델을 적용해 예측값을 생성하고 리스트 자료형인 final_pred에 미래 3개월 매출액 총액 예측액을 추가했다. ###Code submission = pd.read_csv('./submission.csv') submission['amount2'] = final_pred submission.to_csv('submission.csv', index=False) submission ###Output _____no_output_____ ###Markdown 3) STL 분해를 적용한 지수평활법STL(Seasonal and Trend decomposition using Loess):- 시계열 데이터가 가진 시간 주기를 알고 있는 경우 시계열 데이터를 계절성과 추세, 나머지 성분으로 분해해 분석하는 기법- statsmodels 패키지의 seasonal_decompose() 함수는 STL 분해 기능을 제공한다. - 첫번째 인자에 넘파이 어레이 평식으로 매출액 데이터를 넘기고, - freq=12 (1년 12개월)로 설정한다. ###Code from statsmodels.tsa.seasonal import seasonal_decompose import matplotlib.pyplot as plt store_0 = time_series(resampling_data, 0) # STL 분해 stl = seasonal_decompose(store_0.values, freq=12) stl.plot() plt.show() ###Output _____no_output_____ ###Markdown 그래프를 보면,1. 관측된 실제 매출액2. 추세3. 계절성4. 나머지 성분(Residual)로 분해한 것. 즉, '실제 관측값=추세+계절성+나머지 성분'STL 분해의 이유:- 매출의 전반적인 트렌드, 계절성 등을 고려해야 정확한 모델을 만들 수 있기 때문이다. - STL 분해의 장단점: - 장점: - 월별이나 분기별 데이터를 포함하여 어떤 종류의 계절성도 다룰 수 있다. - 계절적인 성분이 시간에 따라 변해도, 계절성분의 변화율을 사용자가 조절할 수 있다. - 추세-주기의 매끄러운 정도를 사용자가 조절할 수 있다. - 가끔 있는 이상값이 추세-주기와 계절성분에 영향을 주지 않게 만들 수 있다(즉, 사용자가 강력한 분해법을 명시할 수 있다). 다만, 이상값은 나머지 성분(remainder)에 영향을 준다. - 단점: - 거래일이나 달력 변동을 자동으로 다루지 않고, 덧셈 분해만 지원한다. R의 forecast 패키지에 있는 stlm 함수로 STL 분해를 하고 지수평활법을 통해 예측하는 코드 ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 # pandas2ri를 활성화 pandas2ri.activate() stlm = """ function(ts){ library(forecast) # forecast 패키지 로드 model = stlm(ts, s.window="periodic", method='ets') # STL 분해 후 지수평활법을 통한 예측 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ ets = """ function(ts){ library(forecast) # forecast 패키지 로드 model = ets(ts) # AIC(모델 품질)가 낮은 지수평활 모델을 찾음(낮을 수록 좋은 모델) forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r을 파이썬에서 사용 가능 stlm = robjects.r(stlm)# str 형식으로 정의된 stlm ets = robjects.r(ets)# str 형식으로 정의된 ets ts = robjects.r('ts')# r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 final_pred = [] for i in tqdm(resampling_data.store_id.unique()): store = resampling_data[resampling_data['store_id']==i] data_len = len(store) start_year = int(min(store['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store['year_month'])[5:]) # 영업 시작 월 # R의 ts 함수로 time series 데이터로 변환 train = ts(store['amount'], start=c(start_year, start_month), frequency=12) # STL 분해를 적용한 지수평활 model if data_len > 24: forecast = stlm(train) # 지수평활 model else: forecast = ets(train) # 3개월 매출을 합산, final_pred에 추가 final_pred.append(np.sum(pandas2ri.ri2py(forecast).values)) ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [02:09<00:00, 15.18it/s] ###Markdown stlm():- STL 분해를 적용 시 최소 2시즌(24개월)보다 많은 데이터를 요구한다. 따라서 상점별 데이터 길이를 구해 24개월보다 많은 경우 STL 분해를 적용했다.- s.window와 t.window: 계절 윈도우와 추세 윈도우를 뜻한다. 계절 및 추세 성분이 변하는 정도를 조절할 수 있으며 값이 작을수록 급격하게 변한다. 인자 값은 둘 다 홀수여야 한다. - 계절 성분을 주기적으로 고정하기 위해 periodic으로 설정했다. - t.window: 기본값 ###Code submission = pd.read_csv('./submission.csv') submission['amount3'] = final_pred submission.to_csv('submission.csv', index=False) submission ###Output _____no_output_____ ###Markdown 4.5 성능 향상을 위한 방법성능 향상에는 크게 두 가지 방법이 있다.- 1. 데이터 전처리를 통한 성능 향상- 2. 여러 모델을 결합해서 예측을 시도하는 앙상블- 본 팀은 데이터 전처리에서 상점 매출액이 로그 정규화와 R의 forecastHybird 패키지를 이용해 ARIMA 모델, 지수평활법, STL 분해를 적용한 지수평활법의 앙상블 예측을 진행했다. 4.5.1 상점 매출액의 로그 정규화 시계열 데이터에서 로그 정규화를 진행하는 이유:- 매출액의 작은 변동을 안정화해 더 큰 트렌드를 파악하기 위함이다.왜 로그 정규화일까? [참고](https://leebaro.tistory.com/entry/%EB%8D%B0%EC%9D%B4%ED%84%B0-%EB%B6%84%EC%84%9D-%EC%8B%9C-%EC%8B%9D%EC%97%90-%EB%A1%9C%EA%B7%B8%EB%A5%BC-%EC%B7%A8%ED%95%98%EB%8A%94-%EC%9D%B4%EC%9C%A0)- 데이터 간 편차를 줄여서 데이터가 한쪽으로 치우치거나 몰리는 것을 방지하여 정규성을 높이고 분석(회귀분석 등)에서 정확한 값을 얻기 위함이다.- 로그는 큰 수를 작게 만들고 복잡한 계산을 간편하게 하기 위해 사용한다. 로그를 취하는 순간 그 수는 지수가 되어버리니, 값이 작아 진다. ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 import numpy as np # pandas2ri를 활성화 pandas2ri.activate() auto_arima = """ function(ts){ library(forecast) # forecast 패키지 로드 d_params = ndiffs(ts) # 시계열 자료의 차분 횟수 계산 model = auto.arima(ts, max.p=2, d=d_params) # auto.arima 모델 생성, 부분차수 최댓값, 차분차수 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 auto_arima = robjects.r(auto_arima) ts = robjects.r('ts')# r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 # c(): 값들을 하나의 벡터 또는 리스트로 결합하는 생성함수 log = robjects.r('log')# 로그 변환 함수 exp = robjects.r('exp')# 로그 역변환 함수 # 0번 상점 추출 store_0 = resampling_data[resampling_data['store_id']==0] start_year = int(min(store_0['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store_0['year_month'])[5:]) # 영업 시작 월 # train, test 분리 train = store_0[store_0.index <= len(store_0)-4] # : 2018년 12월 이전 데이터 test = store_0[store_0.index > len(store_0)-4] # : 2018년 12월~2019년 2월 데이터 # R의 ts 함수로 r의 time series 자료형으로 변환 train_log = ts(log(train['amount']), start=c(start_year, start_month), frequency=12) # log 정규화 train = ts(train['amount'], start=c(start_year, start_month), frequency=12) # log 정규화를 하지 않음 # model arima forecast_log = auto_arima(train_log) forecast = auto_arima(train) # pred pred_log = np.sum(pandas2ri.ri2py(exp(forecast_log)).values) #로그 역변환 후 3개월 합산 pred = np.sum(pandas2ri.ri2py(forecast).values) #3개월 매출을 합산 # test(2018-12~2019-02) test = np.sum(test['amount']) # mae print('log-regularization mae: ', abs(test-pred_log)) print('mae:', abs(test-pred)) ###Output log-regularization mae: 2401.9664819482714 mae: 5884.674066892825 ###Markdown 1. 0번 상점 추출하여 2018년 12월 이전 데이터는 train으로, 2018년 12월~2019년 2월 데이터는 test로 분리2. 로그 정규화는 numpy의 log() 함수를 이용했다. 예측 후 exp() 함수로 역변환해야 한다. - train_log: 매출액의 로그 정규화를 진행한 데이터 - train: 로그 정규화를 진행하지 않았음 - 두 데이터를 자기회귀누적이동평균 모델에 적용하고 실제값test와의 차이를 비교한다.3. 손실함수는 대회 평가척도인 MAE이다.4. 결과를 보면 로그 정규화를 했을 때 실제값과의 차이가 더 작으므로 성능이 더 우수하다. 로그 정규화를 한다고 해서 무조건 성능이 좋아지는 것은 아니다. - 다만, 작은 매출액 변동을 안정화하고 큰 트렌드를 더욱 더 잘 파악할 수 있을 때 성능 향상을 달성할 수 있다.- 매출액 변동계수를 고려해 로그 정규화를 진행했다. - 변동계수: 표준편차를 평균으로 나눈 지표 - 실험 결과 매출액 변동계수가 높은 상점을 로그 정규화했을 때 예측 성능이 저하하여, 매출액 변동계수가 0.3 미만인 상점만 로그 정규화를 진행해 예측했다. 왜 로그 정규화 한 것을 지수함수로 역변환 했을까? [참고](https://m.blog.naver.com/sw4r/221026265991)- 로그 정규화로 인해 데이터는 보다 대칭적인 패턴을 가지게 되고, 극도로 튀는 값들은 사라졌다. 즉, 위에서 설명했듯, 작은 매출액 변동을 안정화하고 큰 트렌드를 더욱 더 잘 파악할 수 있었다. - 이러한 모델을 기초로한 예측 값들은 이 후에 간단하게 exp() 함수를 사용해서 다시 되돌릴 수 있다.- 바로 위의 코드에서 MAE(평균절대오차)를 로그 정규화를 취한 것과 아닌 것을 비교하는데, 매출액 예측값을 도출하는 부분에서 값의 단위가 다르기에 지수함수를 통해 다시 되돌렸다. 가장 성능이 좋았던 지수평활법을 이용해 변동계수를 고려하여 로그 정규화를 한 후 예측값을 생성하는 코드 ###Code # 매출 변동 계수를 구하는 함수 def coefficient_variation(df, i): cv_data = df.groupby(['store_id']).amount.std()/df.groupby(['store_id']).amount.mean() cv = cv_data[i] return cv ###Output _____no_output_____ ###Markdown - 매출 변동 계수는 표준편차에서 평균을 나눠 구한 지표이다. 상점별로 변동계수를 반환한다. ets:- 시계열 데이터에 대해 AIC 값을 최소로 하는 지수평활 모델을 추정한다. - AIC: 주어진 데이터셋에서 통계 모델의 상대적 품질을 나타내는 값. 이 값이 낮을수록 좋은 모델 - AIC = -2ln(L) + 2k - -2ln(L): 모델의 적합도 - k: 추정된 모델의 파라미터 개수 - L: 우도(나타난 결과에 따라 여러 가능한 가설들을 평가할 수 있는 측도) ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 import numpy as np # pandas2ri를 활성화 pandas2ri.activate() ets = """ function(ts){ library(forecast) # forecast 패키지 로드 model = ets(ts) # AIC가 낮은 지수평활 모델을 찾음 forecasted_data = forecast(model, h=3) # 이후 3개월(h=3)을 예측 out_df = data.frame(forecasted_data$mean) # 예측값을 R의 데이터프레임으로 변환 colnames(out_df) = c('amount') # amount라는 열로 이름을 지정 out_df } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 ets = robjects.r(ets) # 시계열 데이터에 대해 AIC 값을 최소로 하는 지수평활 모델을 추정 ts = robjects.r('ts') # r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 log = robjects.r('log') # 로그 변환 함수 exp = robjects.r('exp')# 로그 역변환 함수 final_pred = [] for i in tqdm(resampling_data.store_id.unique()): store = resampling_data[resampling_data['store_id']==i] start_year = int(min(store['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store['year_month'])[5:]) # 영업 시작 월 cv = coefficient_variation(resampling_data, i) # 매출액 변동 계수가 0.3 미만인 경우만 log를 씌움 if cv < 0.3: train_log = ts(log(store['amount']), start=c(start_year,start_month), frequency=12) # ets model forecast_log = ets(train_log) final_pred.append(np.sum(pandas2ri.ri2py(exp(forecast_log)).values)) # 매출액 변동 계수가 0.3 이상인 경우 else: train = ts(store['amount'], start=c(start_year,start_month), frequency=12) # 지수평활법 forecast = ets(train) final_pred.append(np.sum(pandas2ri.ri2py(forecast).values)) ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [17:48<00:00, 1.84it/s] ###Markdown - 상점별로 반복문을 통해 지수평활법을 적용한다. - 변동 계수를 구하는 함수를 통해 변동 계수를 구하고, 변동 계수가 0.3 미만인 상점은 로그 정규화를 진행했다. ###Code submission = pd.read_csv('./submission.csv') submission['amount4'] = final_pred submission.to_csv('submission.csv', index=False) submission ###Output _____no_output_____ ###Markdown - 로그 정규화를 진행하지 않고 지수평활법을 사용했을 때 점수는 794,63이며,- 로그 정규화를 진행한 점수는 793,546점이다.- 본 대회에서 로그 정규화가 1등과 2등의 차이를 만들었다고 본 팀은 생각한다. 4.5.2. 파이썬에서 R 시계열 패키지 forecastHybrid를 통한 앙상블- 여러 모델을 앙상블해서 예측하는 방법은, 가장 많은 성능 향상을 가져오는 방법이다.- 본 팀은 자기회귀누적이동평균 모델, 지수평활법, STL 분해를 적용한 지수평활법으로 3개의 예측치를 생성한 다음, 평균값을 구해 최종 매출액을 계산했다.- 앙상블은 여러 모델을 결합해서 예측하는 것으로 과적합을 방지하는 효과가 있다. 또한 개별 모델의 성능이 잘 안 나올 때 앙상블을 이용하면 성능 향상을 이룰 수 있다.- R의 forecastHybridModel 패키지는 시계열 모델에 대한 앙상블의 진입벽을 낮추었다. ###Code import rpy2.robjects as robjects # r 함수를 파이썬에서 사용 가능하게 변환하는 모듈 from rpy2.robjects import pandas2ri # 파이썬 자료형과 R 자료형의 호환을 도와주는 모듈 import numpy as np # pandas2ri를 활성화 pandas2ri.activate() hybridModel = """ function(ts){ library(forecast) library(forecastHybrid) d_params=ndiffs(ts) hb_mdl<-hybridModel(ts, models="aes", # auto_arima, ets, stlm a.arg=list(max.p=2, d=d_params), # auto_arima parameter weight="equal") # 가중치를 동일하게 줌(평균) forecasted_data<-forecast(hb_mdl, h=3) # 이후 3개월(h=3)을 예측 outdf<-data.frame(forecasted_data$mean) colnames(outdf)<-c('amount') outdf } """ # r() 함수로 r 자료형을 파이썬에서 사용 가능 hybridModel = robjects.r(hybridModel) ts = robjects.r('ts') # r 자료형 time series 자료형으로 만들어주는 함수 c = robjects.r('c') # r 자료형 벡터를 만들어주는 함수 log = robjects.r('log') # 로그 변환 함수 exp = robjects.r('exp')# 로그 역변환 함수 final_pred = [] for i in tqdm(resampling_data.store_id.unique()): store = resampling_data[resampling_data['store_id']==i] start_year = int(min(store['year_month'])[:4]) # 영업 시작 년도 start_month = int(min(store['year_month'])[5:]) # 영업 시작 월 cv = coefficient_variation(resampling_data, i) # 매출액 변동 계수가 0.3 미만인 경우만 log를 씌움 if cv < 0.3: train_log = ts(log(store['amount']), start=c(start_year,start_month), frequency=12) # 앙상블 예측 forecast_log = hybridModel(train_log) final_pred.append(np.sum(pandas2ri.ri2py(exp(forecast_log)).values)) # 매출액 변동 계수가 0.3 이상인 경우 else: train = ts(store['amount'], start=c(start_year,start_month), frequency=12) # 앙상블 예측 forecast = hybridModel(train) final_pred.append(np.sum(pandas2ri.ri2py(forecast).values)) ###Output 100%|██████████████████████████████████████████████████████████████████████████████| 1967/1967 [20:58<00:00, 1.56it/s] ###Markdown - hybridModel(): - 첫번째 인자로 R의 time series 자료형을 받고- models에 넘겨준 인자 aes는 forecast 패키지에 있는 함수 auto.arima, ets, stlm의 압글자를 따서 합친 것이다. 즉, 앙상블 할 모델을 설정하는 인자다.- 따라서 자기회귀누적이동평균 모델, 지수평활법, STL 분해를 적용한 지수평활법을 이용해 앙상블을 진행할 때 인자로 aes를 설정하면 된다.- a.arg: auto.arima의 파라미터를 설정한다.- weight: equal은 가중치를 동일하게 준다.(평균)- 로그 정규화는 cv(변동 계수)가 0.3 이하일 때 진행했다.- 최종 예측치를 hybridModel 함수로 생성했다. ###Code submission = pd.read_csv('./submission.csv') submission['amount5'] = final_pred submission.to_csv('submission.csv', index=False) submission ###Output _____no_output_____
Day1/09-Errors-and-Exceptions.ipynb
###Markdown *This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Errors and Exceptions No matter your skill as a programmer, you will eventually make a coding mistake.Such mistakes come in three basic flavors:- *Syntax errors:* Errors where the code is not valid Python (generally easy to fix)- *Runtime errors:* Errors where syntactically valid code fails to execute, perhaps due to invalid user input (sometimes easy to fix)- *Semantic errors:* Errors in logic: code executes without a problem, but the result is not what you expect (often very difficult to track-down and fix)Here we're going to focus on how to deal cleanly with *runtime errors*.As we'll see, Python handles runtime errors via its *exception handling* framework. Runtime ErrorsIf you've done any coding in Python, you've likely come across runtime errors.They can happen in a lot of ways.For example, if you try to reference an undefined variable: ###Code print(Q) ###Output _____no_output_____ ###Markdown Or if you try an operation that's not defined: ###Code 1 + 'abc' ###Output _____no_output_____ ###Markdown Or you might be trying to compute a mathematically ill-defined result: ###Code 2 / 0 ###Output _____no_output_____ ###Markdown Or maybe you're trying to access a sequence element that doesn't exist: ###Code L = [1, 2, 3] L[1000] ###Output _____no_output_____ ###Markdown Note that in each case, Python is kind enough to not simply indicate that an error happened, but to spit out a *meaningful* exception that includes information about what exactly went wrong, along with the exact line of code where the error happened.Having access to meaningful errors like this is immensely useful when trying to trace the root of problems in your code. Catching Exceptions: ``try`` and ``except``The main tool Python gives you for handling runtime exceptions is the ``try``...``except`` clause.Its basic structure is this: ###Code try: print("this gets executed first") except: print("this gets executed only if there is an error") ###Output this gets executed first ###Markdown Note that the second block here did not get executed: this is because the first block did not return an error.Let's put a problematic statement in the ``try`` block and see what happens: ###Code try: print("let's try something:") x = 1 / 0 # ZeroDivisionError except: print("something bad happened!") ###Output let's try something: something bad happened! ###Markdown Here we see that when the error was raised in the ``try`` statement (in this case, a ``ZeroDivisionError``), the error was caught, and the ``except`` statement was executed.One way this is often used is to check user input within a function or another piece of code.For example, we might wish to have a function that catches zero-division and returns some other value, perhaps a suitably large number like $10^{100}$: ###Code def safe_divide(a, b): try: return a / b except: return 1E100 safe_divide(1, 2) safe_divide(2, 0) ###Output _____no_output_____ ###Markdown There is a subtle problem with this code, though: what happens when another type of exception comes up? For example, this is probably not what we intended: ###Code safe_divide (1, '2') ###Output _____no_output_____ ###Markdown Dividing an integer and a string raises a ``TypeError``, which our over-zealous code caught and assumed was a ``ZeroDivisionError``!For this reason, it's nearly always a better idea to catch exceptions *explicitly*: ###Code def safe_divide(a, b): try: return a / b except ZeroDivisionError: return 1E100 safe_divide(1, 0) def safe_divide(a, b): try: return a / b except Exception as e: print('Error:', e) def safe_divide(a, b): try: return a / b except Exception as e: print('Error:', e) print(type(e)) print(dir(e)) # prints all the methods available for an int object y = 5 dir(y) safe_divide(1, '2') ###Output _____no_output_____ ###Markdown We're now catching zero-division errors only, and letting all other errors pass through un-modified. Raising Exceptions: ``raise``We've seen how valuable it is to have informative exceptions when using parts of the Python language.It's equally valuable to make use of informative exceptions within the code you write, so that users of your code (foremost yourself!) can figure out what caused their errors.The way you raise your own exceptions is with the ``raise`` statement. For example: ###Code raise RuntimeError("my error message") ###Output _____no_output_____ ###Markdown As an example of where this might be useful, let's return to our ``fibonacci`` function that we defined previously: ###Code def fibonacci(N): L = [] a, b = 0, 1 while len(L) < N: a, b = b, a + b L.append(a) return L ###Output _____no_output_____ ###Markdown One potential problem here is that the input value could be negative.This will not currently cause any error in our function, but we might want to let the user know that a negative ``N`` is not supported.Errors stemming from invalid parameter values, by convention, lead to a ``ValueError`` being raised: ###Code def fibonacci(N): if N < 0: raise ValueError("N must be non-negative") L = [] a, b = 0, 1 while len(L) < N: a, b = b, a + b L.append(a) return L fibonacci(10) fibonacci(-10) ###Output _____no_output_____ ###Markdown Now the user knows exactly why the input is invalid, and could even use a ``try``...``except`` block to handle it! ###Code N = -10 try: print("trying this...") print(fibonacci(N)) except ValueError: print("Bad value: need to do something else") ###Output trying this... Bad value: need to do something else ###Markdown Diving Deeper into ExceptionsBriefly, I want to mention here some other concepts you might run into.I'll not go into detail on these concepts and how and why to use them, but instead simply show you the syntax so you can explore more on your own. Accessing the error messageSometimes in a ``try``...``except`` statement, you would like to be able to work with the error message itself.This can be done with the ``as`` keyword: ###Code try: x = 1 / 0 except ZeroDivisionError as err: print("Error class is: ", type(err)) print("Error message is:", err) ###Output Error class is: <class 'ZeroDivisionError'> Error message is: division by zero ###Markdown With this pattern, you can further customize the exception handling of your function. Defining custom exceptionsIn addition to built-in exceptions, it is possible to define custom exceptions through *class inheritance*.For instance, if you want a special kind of ``ValueError``, you can do this: ###Code class MySpecialError(ValueError): pass raise MySpecialError("here's the message") ###Output _____no_output_____ ###Markdown This would allow you to use a ``try``...``except`` block that only catches this type of error: ###Code try: print("do something") raise MySpecialError("[informative error message here]") except MySpecialError: print("do something else") ###Output do something do something else ###Markdown You might find this useful as you develop more customized code. ``try``...``except``...``else``...``finally``In addition to ``try`` and ``except``, you can use the ``else`` and ``finally`` keywords to further tune your code's handling of exceptions.The basic structure is this: ###Code try: print("try something here") except: print("this happens only if it fails") else: print("this happens only if it succeeds") finally: print("this happens no matter what") ###Output try something here this happens only if it succeeds this happens no matter what
2D Code/supportCorrections.ipynb
###Markdown Weights for convolution quadrature: ###Code def weights(N,T,Ms,Ks): dt = T/N if Ms == 0: gam = lambda z: 1-z #BDF1 elif Ms == 1: gam = lambda z: 0.5 * (1-z) * (3-z) #BDF2 elif Ms == 2: gam = lambda z: 2 * (1-z) / (1+z) #Trapezoidal m = np.arange(0,(4*N)+1,1) zs = np.exp(-1j * 2 * math.pi * (m) / (4*N+1)) lam = 10 ** (-15 / (5*N)) tmp = (np.fft.ifft(Ks(gam(lam * zs)/dt))).real p = np.arange(0,-N-1,-1) g = (lam ** p) * tmp[0:N+1] return g ###Output _____no_output_____ ###Markdown Correction weights $w_{n1}$ and $w_{n0}$: ###Code def corr_weights1(n,gamma): if gamma<0: wn1 = 0 elif gamma>0: wj = 0 for k in range (0,n+1): wj += stored_weights[n-k] * k wn1 = (dt**(-gamma)*(n)**(1-gamma))/math.gamma(2-gamma) - wj return float(wn1) def corr_weights0(n,gamma): w = 0 for k in range (0,n+1): w += stored_weights[n-k] if gamma<0: wn0 = (dt*n)**(-gamma)/math.gamma(1-gamma) - w else: wn0 = -corr_weights1(n,gamma) - w return wn0 ###Output _____no_output_____ ###Markdown Gauss Quadrature: ###Code def gauss(N): beta = 0.5 / np.sqrt(1-1/((2*np.arange(1,N))**(2))) T = np.diag(beta,1) + np.diag(beta,-1) D,V = np.linalg.eigh(T) #eigenvalues and eigenvectors j = np.argsort(D[:]) #index arrangement D.sort(axis=0) #sort D w = 2*(V[0,j]**2) return (D,w) ###Output _____no_output_____ ###Markdown Gauss-Jacobi Quadrature: ###Code def gaussj(n,alf,bet): apb = alf + bet a1 = (bet-alf)/(apb+2) N1 = np.arange(2,n+1) aN = (apb)*(bet-alf) / ((apb+2*N1)*(apb+2*N1-2)) a = np.append(a1,aN) b1 = math.sqrt(4*(1+alf)*(1+bet) / ((apb+3)*(apb+2)**2)) N2 = np.arange(2,n) bN = np.sqrt(4*N2*(N2+alf)*(N2+bet)*(N2+apb)/(((apb+2*N2)**2-1)*(apb+2*N2)**2)) b = np.append(b1,bN) if n>1: D,V = np.linalg.eigh(np.diag(a) + np.diag(b,1) + np.diag(b,-1)) else: V = 1 D = a c = 2**(apb+1)*math.gamma(alf+1)*math.gamma(bet+1)/math.gamma(apb+2) j = np.argsort(D[:]) #index arrangement D.sort(axis=0) #sort D if type(V)==int: w = c * (V**2) else: w = c * (V[0,j]**2) return (D,w) ###Output _____no_output_____ ###Markdown Compute the fractional integral: ###Code def simple_fint(t,g,alpha,**nq): #t = time #g = function #alpha = fractional integral power if nq is not None: nq = 40 t = np.array([t]) f = np.zeros(np.shape(t)) (xj,wj) = gaussj(nq,alpha-1,0) (x,w) = gauss(nq) x = (x+1)/2 w = w/2 gv = np.vectorize(g) for j in range (0,len(t)): t1 = t[j]*3/4 t1d = t[j]-t1 f[j] = ((t1d/2)**alpha)*np.dot(wj,gv((xj+1)*t1d/2+t1)) if t[j]>0: f[j] = f[j]+(t1/2)*np.dot(w,(gv(x*t1/2)*(t[j]-t1*x/2)**(alpha-1))) f[j] = f[j]+(t1/2)*np.dot(w,(gv((x+1)*t1/2)*(t[j]-t1*(x+1)/2)**(alpha-1))) f = (1/math.gamma(alpha))*f return f[0] ###Output _____no_output_____
docs/tutorials/deform_source_mesh_to_target_mesh.ipynb
###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.> NOTE: We use the chamfer distance of points sampled from src & tgt meshes as optimization cost. There is no diff-mesh-renderer involved.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.11.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. pyt_version_str=torch.__version__.split("+")[0].replace(".", "") version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{pyt_version_str}" ]) !pip install fvcore iopath !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:6") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code # !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() %matplotlib inline # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.10.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{torch.__version__[0:5:2]}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.11.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. pyt_version_str=torch.__version__.split("+")[0].replace(".", "") version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{pyt_version_str}" ]) !pip install fvcore iopath !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{torch.__version__[0:5:2]}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.7") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{torch.__version__[0:5:2]}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.9") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{torch.__version__[0:5:2]}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.10.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. pyt_version_str=torch.__version__.split("+")[0].replace(".", "") version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{pyt_version_str}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.7") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{torch.__version__[0:5:2]}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision import os import sys import torch if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): !pip install pytorch3d else: need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.10.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. pyt_version_str=torch.__version__.split("+")[0].replace(".", "") version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{pyt_version_str}" ]) !pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules Ensure `torch` and `torchvision` are installed. If `pytorch3d` is not installed, install it using the following cell: ###Code import os import sys import torch need_pytorch3d=False try: import pytorch3d except ModuleNotFoundError: need_pytorch3d=True if need_pytorch3d: if torch.__version__.startswith("1.10.") and sys.platform.startswith("linux"): # We try to install PyTorch3D via a released wheel. pyt_version_str=torch.__version__.split("+")[0].replace(".", "") version_str="".join([ f"py3{sys.version_info.minor}_cu", torch.version.cuda.replace(".",""), f"_pyt{pyt_version_str}" ]) !pip install fvcore iopath !pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html else: # We try to install PyTorch3D from source. !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz !tar xzf 1.10.0.tar.gz os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(float(loss_chamfer.detach().cpu())) edge_losses.append(float(loss_edge.detach().cpu())) normal_losses.append(float(loss_normal.detach().cpu())) laplacian_losses.append(float(loss_laplacian.detach().cpu())) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision import sys import torch if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'): !pip install pytorch3d else: !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm.notebook import tqdm %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device if torch.cuda.is_available(): device = torch.device("cuda:0") else: device = torch.device("cpu") print("WARNING: CPU only, this will be slow!") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16"); ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3d **Meshes** datastructure- How to use 4 different PyTorch3d **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output _____no_output_____ ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output _____no_output_____ ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____ ###Markdown Deform a source mesh to form a target mesh using 3D loss functions In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape.We will cover: - How to **load a mesh** from an `.obj` file- How to use the PyTorch3D **Meshes** datastructure- How to use 4 different PyTorch3D **mesh loss functions**- How to set up an **optimization loop**Starting from a sphere mesh, we learn the offset to each vertex in the mesh such thatthe predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize:+ `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add:+ `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh.+ `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces.+ `mesh_laplacian_smoothing`, which is the laplacian regularizer. 0. Install and Import modules If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: ###Code !pip install torch torchvision !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable2' import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ###Output _____no_output_____ ###Markdown 1. Load an obj file and create a Meshes object Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. ###Code !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) ###Output /opt/conda/conda-bld/pytorch_1587428190859/work/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead. ###Markdown Visualize the source and target meshes ###Code def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") ###Output _____no_output_____ ###Markdown 3. Optimization loop ###Code # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() ###Output /home/alexis/anaconda3/envs/pytorch3d/lib/python3.7/site-packages/ipykernel_launcher.py:13: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` del sys.path[0] ###Markdown 4. Visualize the loss ###Code fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") ###Output _____no_output_____ ###Markdown 5. Save the predicted mesh ###Code # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) ###Output _____no_output_____
University of Washington - Machine Learning Specialization/University of Washington - Machine Learning Regression/module6- kNN and Kernel Regression/REG06-NB01.ipynb
###Markdown Predicting house prices using k-nearest neighbors regression salimtIn this notebook, you will implement k-nearest neighbors regression. You will: * Find the k-nearest neighbors of a given query input * Predict the output for the query input using the k-nearest neighbors * Choose the best value of k using a validation set Fire up Turi Create ###Code import turicreate ###Output _____no_output_____ ###Markdown Load in house sales data For this notebook, we use a subset of the King County housing dataset created by randomly selecting 40% of the houses in the full dataset. ###Code sales = turicreate.SFrame('home_data_small.sframe/') ###Output _____no_output_____ ###Markdown Import useful functions from previous notebooks To efficiently compute pairwise distances among data points, we will convert the SFrame into a 2D Numpy array. First import the numpy library and then copy and paste `get_numpy_data()` from the second notebook of Week 2. ###Code import numpy as np # note this allows us to refer to numpy as np instead def get_numpy_data(data_sframe, features, output): data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame # add the column 'constant' to the front of the features list so that we can extract it along with the others: features = ['constant'] + features # this is how you combine two lists # select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant): features_sframe = data_sframe[features] # the following line will convert the features_SFrame into a numpy matrix: feature_matrix = features_sframe.to_numpy() # assign the column of data_sframe associated with the output to the SArray output_sarray output_sarray = data_sframe[output] # the following will convert the SArray into a numpy array by first converting it to a list output_array = output_sarray.to_numpy() return(feature_matrix, output_array) ###Output _____no_output_____ ###Markdown We will also need the `normalize_features()` function from Week 5 that normalizes all feature columns to unit norm. Paste this function below. ###Code def normalize_features(feature_matrix): norms = np.linalg.norm(feature_matrix, axis=0) return (feature_matrix / norms, norms) ###Output _____no_output_____ ###Markdown Split data into training, test, and validation sets ###Code (train_and_validation, test) = sales.random_split(.8, seed=1) # initial train/test split (train, validation) = train_and_validation.random_split(.8, seed=1) # split training set into training and validation sets ###Output _____no_output_____ ###Markdown Extract features and normalize Using all of the numerical inputs listed in `feature_list`, transform the training, test, and validation SFrames into Numpy arrays: ###Code feature_list = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated', 'lat', 'long', 'sqft_living15', 'sqft_lot15'] features_train, output_train = get_numpy_data(train, feature_list, 'price') features_test, output_test = get_numpy_data(test, feature_list, 'price') features_valid, output_valid = get_numpy_data(validation, feature_list, 'price') ###Output _____no_output_____ ###Markdown In computing distances, it is crucial to normalize features. Otherwise, for example, the `sqft_living` feature (typically on the order of thousands) would exert a much larger influence on distance than the `bedrooms` feature (typically on the order of ones). We divide each column of the training feature matrix by its 2-norm, so that the transformed column has unit norm.IMPORTANT: Make sure to store the norms of the features in the training set. The features in the test and validation sets must be divided by these same norms, so that the training, test, and validation sets are normalized consistently. ###Code features_train, norms = normalize_features(features_train) # normalize training set features (columns) features_test = features_test / norms # normalize test set by training set norms features_valid = features_valid / norms # normalize validation set by training set norms ###Output _____no_output_____ ###Markdown Compute a single distance To start, let's just explore computing the "distance" between two given houses. We will take our **query house** to be the first house of the test set and look at the distance between this house and the 10th house of the training set.To see the features associated with the query house, print the first row (index 0) of the test feature matrix. You should get an 18-dimensional vector whose components are between 0 and 1. ###Code query_house = features_test[0] query_house ###Output _____no_output_____ ###Markdown Now print the 10th row (index 9) of the training feature matrix. Again, you get an 18-dimensional vector with components between 0 and 1. ###Code target = features_test[9] target ###Output _____no_output_____ ###Markdown ***QUIZ QUESTION ***What is the Euclidean distance between the query house and the 10th house of the training set? Note: Do not use the `np.linalg.norm` function; use `np.sqrt`, `np.sum`, and the power operator (`**`) instead. The latter approach is more easily adapted to computing multiple distances at once. ###Code np.sqrt(np.sum((features_test[9]-features_test[0])**2)) ###Output _____no_output_____ ###Markdown Compute multiple distances Of course, to do nearest neighbor regression, we need to compute the distance between our query house and *all* houses in the training set. To visualize this nearest-neighbor search, let's first compute the distance from our query house (`features_test[0]`) to the first 10 houses of the training set (`features_train[0:10]`) and then search for the nearest neighbor within this small set of houses. Through restricting ourselves to a small set of houses to begin with, we can visually scan the list of 10 distances to verify that our code for finding the nearest neighbor is working.Write a loop to compute the Euclidean distance from the query house to each of the first 10 houses in the training set. ###Code dist_list = [np.sqrt(np.sum((features_train[i]-features_test[0])**2)) for i in range(0,10)] print(len(dist_list), dist_list) ###Output 10 [0.0602747091729555, 0.08546881148827083, 0.06149946437120284, 0.05340273978820058, 0.05844484063938139, 0.05987921510184001, 0.05463140497261526, 0.05543108324159792, 0.05238362784097273, 0.05972359371666126] ###Markdown *** QUIZ QUESTION ***Among the first 10 training houses, which house is the closest to the query house? ###Code idx = dist_list.index(min(dist_list[1:])) print(dist_list[idx],":", idx) ###Output 0.05238362784097273 : 8 ###Markdown It is computationally inefficient to loop over computing distances to all houses in our training dataset. Fortunately, many of the Numpy functions can be **vectorized**, applying the same operation over multiple values or vectors. We now walk through this process. Consider the following loop that computes the element-wise difference between the features of the query house (`features_test[0]`) and the first 3 training houses (`features_train[0:3]`): ###Code for i in range(3): print (features_train[i]-features_test[0]) # should print 3 vectors of length 18 ###Output [ 0.00000000e+00 -3.87821276e-03 -1.20498190e-02 -1.05552733e-02 2.08673616e-04 -8.52950206e-03 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -3.47633726e-03 -5.50336860e-03 -2.48168183e-02 -1.63756198e-04 0.00000000e+00 -1.70072004e-05 1.30577772e-05 -5.14364795e-03 6.69281453e-04] [ 0.00000000e+00 -3.87821276e-03 -4.51868214e-03 -2.26610387e-03 7.19763456e-04 0.00000000e+00 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -3.47633726e-03 1.30705004e-03 -1.45830788e-02 -1.91048898e-04 6.65082271e-02 4.23240653e-05 6.22415897e-06 -2.89330197e-03 1.47606982e-03] [ 0.00000000e+00 -7.75642553e-03 -1.20498190e-02 -1.30002801e-02 1.60518166e-03 -8.52950206e-03 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -5.21450589e-03 -8.32384500e-03 -2.48168183e-02 -3.13866046e-04 0.00000000e+00 4.71047219e-05 1.56530415e-05 3.72914476e-03 1.64764925e-03] ###Markdown The subtraction operator (`-`) in Numpy is vectorized as follows: ###Code print (features_train[0:3] - features_test[0]) ###Output [[ 0.00000000e+00 -3.87821276e-03 -1.20498190e-02 -1.05552733e-02 2.08673616e-04 -8.52950206e-03 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -3.47633726e-03 -5.50336860e-03 -2.48168183e-02 -1.63756198e-04 0.00000000e+00 -1.70072004e-05 1.30577772e-05 -5.14364795e-03 6.69281453e-04] [ 0.00000000e+00 -3.87821276e-03 -4.51868214e-03 -2.26610387e-03 7.19763456e-04 0.00000000e+00 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -3.47633726e-03 1.30705004e-03 -1.45830788e-02 -1.91048898e-04 6.65082271e-02 4.23240653e-05 6.22415897e-06 -2.89330197e-03 1.47606982e-03] [ 0.00000000e+00 -7.75642553e-03 -1.20498190e-02 -1.30002801e-02 1.60518166e-03 -8.52950206e-03 0.00000000e+00 -5.10236549e-02 0.00000000e+00 -5.21450589e-03 -8.32384500e-03 -2.48168183e-02 -3.13866046e-04 0.00000000e+00 4.71047219e-05 1.56530415e-05 3.72914476e-03 1.64764925e-03]] ###Markdown Note that the output of this vectorized operation is identical to that of the loop above, which can be verified below: ###Code # verify that vectorization works results = features_train[0:3] - features_test[0] print (results[0] - (features_train[0]-features_test[0])) # should print all 0's if results[0] == (features_train[0]-features_test[0]) print (results[1] - (features_train[1]-features_test[0])) # should print all 0's if results[1] == (features_train[1]-features_test[0]) print (results[2] - (features_train[2]-features_test[0])) # should print all 0's if results[2] == (features_train[2]-features_test[0]) ###Output [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] ###Markdown Aside: it is a good idea to write tests like this cell whenever you are vectorizing a complicated operation. Perform 1-nearest neighbor regressionNow that we have the element-wise differences, it is not too hard to compute the Euclidean distances between our query house and all of the training houses. First, write a single-line expression to define a variable `diff` such that `diff[i]` gives the element-wise difference between the features of the query house and the `i`-th training house. ###Code diff = features_train[0:] - features_test[0] print(len(dist_list), dist_list) ###Output 10 [0.0602747091729555, 0.08546881148827083, 0.06149946437120284, 0.05340273978820058, 0.05844484063938139, 0.05987921510184001, 0.05463140497261526, 0.05543108324159792, 0.05238362784097273, 0.05972359371666126] ###Markdown To test the code above, run the following cell, which should output a value -0.0934339605842: ###Code print (diff[-1].sum()) # sum of the feature differences between the query and last training house # should print -0.0934339605842 ###Output -0.0934339605841801 ###Markdown The next step in computing the Euclidean distances is to take these feature-by-feature differences in `diff`, square each, and take the sum over feature indices. That is, compute the sum of square feature differences for each training house (row in `diff`).By default, `np.sum` sums up everything in the matrix and returns a single number. To instead sum only over a row or column, we need to specifiy the `axis` parameter described in the `np.sum` [documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.sum.html). In particular, `axis=1` computes the sum across each row.Below, we compute this sum of square feature differences for all training houses and verify that the output for the 16th house in the training set is equivalent to having examined only the 16th row of `diff` and computing the sum of squares on that row alone. ###Code print (np.sum(diff**2, axis=1)[15]) # take sum of squares across each row, and print the 16th sum print (np.sum(diff[15]**2)) # print the sum of squares for the 16th row -- should be same as above ###Output 0.003307059028786791 0.0033070590287867904 ###Markdown With this result in mind, write a single-line expression to compute the Euclidean distances between the query house and all houses in the training set. Assign the result to a variable `distances`.**Hint**: Do not forget to take the square root of the sum of squares. ###Code distances = [np.sqrt(np.sum((features_train[i]-features_test[0])**2)) for i in range(len(features_train))] #print(len(dist_list), dist_list) ###Output _____no_output_____ ###Markdown To test the code above, run the following cell, which should output a value 0.0237082324496: ###Code print (distances[100]) # Euclidean distance between the query house and the 101th training house # should print 0.0237082324496 ###Output 0.023708232449603735 ###Markdown Now you are ready to write a function that computes the distances from a query house to all training houses. The function should take two parameters: (i) the matrix of training features and (ii) the single feature vector associated with the query. ###Code def calcDist(features_matrix, query_house): # return {i: np.sqrt(np.sum((features_matrix[i]-query_house)**2)) for i in range(len(features_matrix)) if features_matrix[i].all != query_house.all} return {i: np.sqrt(np.sum((features_matrix[i]-query_house)**2)) for i in range(len(features_matrix))} def findClosest(dist_dict, query_house): return min(dist_dict, key=lambda k: dist_dict[k] if dist_dict[k].all!=query_house.all else Raise(ValueError("same as query_house"))) ###Output _____no_output_____ ###Markdown *** QUIZ QUESTIONS ***1. Take the query house to be third house of the test set (`features_test[2]`). What is the index of the house in the training set that is closest to this query house?2. What is the predicted value of the query house based on 1-nearest neighbor regression? ###Code dists = calcDist(features_train, features_test[2]) closestIdx = findClosest(dists, features_test[2]) closestIdx output_train[closestIdx] ###Output _____no_output_____ ###Markdown Perform k-nearest neighbor regression For k-nearest neighbors, we need to find a *set* of k houses in the training set closest to a given query house. We then make predictions based on these k nearest neighbors. Fetch k-nearest neighborsUsing the functions above, implement a function that takes in * the value of k; * the feature matrix for the training houses; and * the feature vector of the query house and returns the indices of the k closest training houses. For instance, with 2-nearest neighbor, a return value of [5, 10] would indicate that the 6th and 11th training houses are closest to the query house.**Hint**: Look at the [documentation for `np.argsort`](http://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html). ###Code def kClosest(k, feature_matrix, query_house): d = calcDist(feature_matrix, query_house) kNN = [] for i in range(k): kNN.append(findClosest(d, query_house)) d.pop(kNN[i], None) return kNN ###Output _____no_output_____ ###Markdown *** QUIZ QUESTION ***Take the query house to be third house of the test set (`features_test[2]`). What are the indices of the 4 training houses closest to the query house? ###Code kClosest(4,features_train,features_test[2]) ###Output _____no_output_____ ###Markdown Make a single prediction by averaging k nearest neighbor outputs Now that we know how to find the k-nearest neighbors, write a function that predicts the value of a given query house. **For simplicity, take the average of the prices of the k nearest neighbors in the training set**. The function should have the following parameters: * the value of k; * the feature matrix for the training houses; * the output values (prices) of the training houses; and * the feature vector of the query house, whose price we are predicting. The function should return a predicted value of the query house.**Hint**: You can extract multiple items from a Numpy array using a list of indices. For instance, `output_train[[6, 10]]` returns the prices of the 7th and 11th training houses. ###Code def avgKnn(k, feature_matrix, output, query_house): klosest = kClosest(k, feature_matrix, query_house) return sum(output[kClosest(k, feature_matrix, query_house)])/len(klosest) ###Output _____no_output_____ ###Markdown *** QUIZ QUESTION ***Again taking the query house to be third house of the test set (`features_test[2]`), predict the value of the query house using k-nearest neighbors with `k=4` and the simple averaging method described and implemented above. ###Code avgKnn(4, features_train, output_train, features_test[2]) ###Output _____no_output_____ ###Markdown Compare this predicted value using 4-nearest neighbors to the predicted value using 1-nearest neighbor computed earlier. Make multiple predictions Write a function to predict the value of *each and every* house in a query set. (The query set can be any subset of the dataset, be it the test set or validation set.) The idea is to have a loop where we take each house in the query set as the query house and make a prediction for that specific house. The new function should take the following parameters: * the value of k; * the feature matrix for the training houses; * the output values (prices) of the training houses; and * the feature matrix for the query set. The function should return a set of predicted values, one for each house in the query set.**Hint**: To get the number of houses in the query set, use the `.shape` field of the query features matrix. See [the documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.ndarray.shape.html). ###Code def multiPredictions(k, feature_matrix, output, querySet): return {q: avgKnn(k, feature_matrix, output, querySet[q]) for q in range(len(querySet))} def getMin(d): return min(d, key=lambda k: d[k]) ###Output _____no_output_____ ###Markdown *** QUIZ QUESTION ***Make predictions for the first 10 houses in the test set using k-nearest neighbors with `k=10`. 1. What is the index of the house in this query set that has the lowest predicted value? 2. What is the predicted value of this house? ###Code getMin(multiPredictions(10, features_train, output_train, features_test[0:10])) ###Output _____no_output_____ ###Markdown Choosing the best value of k using a validation set There remains a question of choosing the value of k to use in making predictions. Here, we use a validation set to choose this value. Write a loop that does the following:* For `k` in [1, 2, ..., 15]: * Makes predictions for each house in the VALIDATION set using the k-nearest neighbors from the TRAINING set. * Computes the RSS for these predictions on the VALIDATION set * Stores the RSS computed above in `rss_all`* Report which `k` produced the lowest RSS on VALIDATION set. (Depending on your computing environment, this computation may take 10-15 minutes.) ###Code rss_all = {k: sum((output_valid-list(multiPredictions(k, features_train, output_train, features_valid).values()))**2) for k in range(1,16)} rss_all k_val = getMin(rss_all) k_val ###Output _____no_output_____ ###Markdown To visualize the performance as a function of `k`, plot the RSS on the VALIDATION set for each considered `k` value: ###Code import matplotlib.pyplot as plt %matplotlib inline kvals = range(1, 16) plt.plot(kvals, rss_all.values(),'bo-') ###Output _____no_output_____ ###Markdown ***QUIZ QUESTION ***What is the RSS on the TEST data using the value of k found above? To be clear, sum over all houses in the TEST set. ###Code print('{:.2e}'.format(sum(((output_test-list(multiPredictions(k_val, features_valid, output_valid, features_test).values()))**2)))) ###Output 1.45e+14
20. Adv OOPs Concepts/06. ClassMethods.ipynb
###Markdown classmethod ###Code # task is pass String and get Complex %reset -f class Complex: def __init__(self, real, img): self.real = int(real) self.img = int(img) def __str__(self): return f"Complex {self.real} + {self.img}i" @classmethod def fromString(cls, c_string): real, img = c_string.split("-") return cls(real, img ) C4 = Complex.fromString("10-14") print(C4) type(C4) # task is I have 2 complex number and i want their addition as Object class Complex: def __init__(self, real, img): self.real = int(real) self.img = int(img) def __add__(self, e): return Complex(self.real+e.real, self.img+e.img) def __str__(self): return f"Complex {self.real} + {self.img}i" @classmethod def fromString(cls, c_string): return cls(c_string.split("-")[0],c_string.split("-")[1] ) C1 = Complex(40, 60) C2 = Complex(100, 50) C4 = Complex.fromString("10-14") print(C4) C3 = C1 + C2 print(C3) type(C3) '10-14'.split("-") # task is I have 2 complex number and i want their addition as Object %reset -f class Complex: def __init__(self, *args): self.real = int(args[0][0]) self.img = int(args[0][1]) def __add__(self, e): return Complex([self.real+e.real, self.img+e.img]) def __str__(self): return f"Complex {self.real} + {self.img}i" @classmethod def fromString(cls, c_string): return cls(c_string.split("-") ) C1 = Complex([40, 60]) C2 = Complex([100, 50]) C4 = Complex.fromString("10-14") print(C4) C3 = C1 + C2 print(C3) type(C3) %who ###Output _____no_output_____
02 - Decision Trees and Linear Regression (Solutions)(1).ipynb
###Markdown Introductory applied machine learning (INFR10069) Lab 2: Decision trees and linear regression In the first part of this lab we perform Decision trees classification on the [German credit](http://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29) data set. In the second part we learn how to train simple linear regression model by using the [CPU performance](https://archive.ics.uci.edu/ml/datasets/Computer+Hardware) data set. Both datasets (`credit.csv` and `cpu.csv`) are located within the `datasets` directory (adjacent to this file). As always, first activate the python environment if not already done so (remember to exclude 'source' if you're on windows): ```{bash}source activate py3iaml``` Now let's import the packages (*This will generate some warnings related to the libraries: ignore*): ###Code # Import packages import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import graphviz from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix, r2_score %matplotlib inline ###Output _____no_output_____ ###Markdown 1. Decision TreesOne of the great advantages of decision trees is their interpretability. The rules learnt for classification are easy for a person to follow, unlike the opaque "black box" of many other methods, such as neural networks. We demonstrate the utility of this using a German credit data set. You can read a description of this dataset at the [UCI site](http://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29). The task is to predict whether a loan approval is good or bad credit risk based on 20 attributes. We've simplified the data set somewhat, particularly making attribute names and values more meaningful. We will load the credit dataset into a pandas DataFrame structure. ###Code # Load the dataset data_path = os.path.join(os.getcwd(), 'datasets', 'credit.csv') credit = pd.read_csv(data_path, delimiter = ',') ###Output _____no_output_____ ###Markdown ========== Question 1.1 ==========Display the number of data points and attributes in the dataset. ###Code # Student needs to provide code similar to below print('Number of samples: {}, number of attributes: {}'.format(credit.shape[0], credit.shape[1])) ###Output Number of samples: 1001, number of attributes: 21 ###Markdown ========== Question 1.2 ==========Get a feeling of the data by using pandas `describe()` method. Be careful - there is a mixture of numeric and categorical data and hence will need to output it in two stages: ###Code # Output Numeric Data # Student needs to provide code similar to below credit.describe(include=[np.number]) # Output Categorical Data # Student needs to provide code similar to below credit.describe(include=['O']) ###Output _____no_output_____ ###Markdown ========== Question 1.3 ==========Display the first 10 data points of the dataset*TIP*: You may need to set the option to display all columns: look at [pandas.set_option](https://pandas.pydata.org/pandas-docs/version/1.3.1/reference/api/pandas.set_option.html). ###Code # Student needs to provide code similar to below pd.set_option('display.max_columns', None) credit.head(10) ###Output _____no_output_____ ###Markdown ========== Question 1.4 ==========When presented with a dataset, it is usually a good idea to visualise it first. By using seaborn's [pairplot](https://seaborn.github.io/generated/seaborn.pairplot.html?highlight=pairplotseaborn.pairplot) function, try visualising a scatter plot of the `Age` and `Duration` variables. You can use the `Approve` variable as the `hue` parameter to visualise results separately for each class. Do you notice anything unusual? ###Code # Student needs to provide code similar to below g = sns.pairplot(data=credit, vars=['Duration', 'Age'], hue='Approve', height=3) ###Output _____no_output_____ ###Markdown ***Student needs to answer similar to below:***There is a data point with negative age. This doesn't make sense whatsoever and this data point has been clearly corrupted with noise. ========== Question 1.5 ==========In the previous point you should have found a data point, which seems to be corrupted, as some of its values are nonsensical. Even a single point like this can significantly affect the performance of a classifier. How do you think it would affect Decision trees? How about Naive Bayes? A good way to check this is to test the performance of each classifier before and after removing this datapoint. ***Student needs to answer similar to below:***In such a continuous scenario, Naive Bayes fits Gaussian distributions and thus is very sensitive to outliers. Decision trees is expected to be less affected. ========== Question 1.6 ==========Now we want to remove this instance from the dataset by using a filter. In general, we want to remove all instances, where the age of an applicant is lower than 0 years, as this suggests that the instance is corrupted. Use logical indexing to get rid of these instances without creating a new dataframe. Display the number of data points after any outliers have been removed. ###Code # Student needs to provide code similar to below credit = credit[credit['Age']>0] print('Number of data points after removal of outliers: {}'.format(credit.shape[0])) ###Output Number of data points after removal of outliers: 1000 ###Markdown Categorical Data and Encoding ========== Question 1.7 ==========You might have noticed that most of the attributes in the dataset are in fact discrete. Now we want to know which variables exactly are discrete (both categorical and numerical, look [here](http://stats.stackexchange.com/questions/206/what-is-the-difference-between-discrete-data-and-continuous-data) if you are unsure about the difference) and which are continuous variables. In order to do so, we will inspect the number of possible values that each attribute can take. Display the number of values each attributes takes in the dataset. *Hint: As a first step, you want to loop over the columns of the DataFrame. Then you might find the numpy `unique` function quite useful.* ###Code # Student needs to provide code similar to below for column in credit: unique_vals = np.unique(credit[column]) nr_vals = len(unique_vals) if nr_vals < 11: print('Number of values for attribute {}: {} -- {}'.format(column, nr_vals, unique_vals)) else: print('Number of values for attribute {}: {}'.format(column, nr_vals)) ###Output Number of values for attribute CheckingAccount: 4 -- ['<0' '<200' '>=200' 'none'] Number of values for attribute Duration: 33 Number of values for attribute CreditHistory: 5 -- ['critical' 'ok' 'ok_at_this_bank' 'ok_til_now' 'past_delays'] Number of values for attribute Purpose: 10 -- ['appliances' 'business' 'car_new' 'car_used' 'education' 'furniture' 'others' 'repairs' 'retraining' 'television'] Number of values for attribute CreditAmount: 921 Number of values for attribute SavingsAccount: 5 -- ['<100' '<1000' '<500' '>=1000' 'unknown'] Number of values for attribute YearsEmployed: 5 -- ['<1' '<4' '<7' '>=7' 'unemployed'] Number of values for attribute InstallmentRate: 4 -- [1. 2. 3. 4.] Number of values for attribute PersonalStatus: 4 -- ['female' 'male_divorced' 'male_married' 'male_single'] Number of values for attribute OtherDebtors: 3 -- ['co' 'guarantor' 'none'] Number of values for attribute ResidentSince: 4 -- [1. 2. 3. 4.] Number of values for attribute Property: 4 -- ['car' 'real_estate' 'savings' 'unknown'] Number of values for attribute Age: 53 Number of values for attribute OtherPlans: 3 -- ['bank' 'none' 'stores'] Number of values for attribute Housing: 3 -- ['free' 'own' 'rent'] Number of values for attribute NumCreditsAtBank: 4 -- [1. 2. 3. 4.] Number of values for attribute Job: 4 -- ['management' 'skilled' 'unemployed' 'unskilled'] Number of values for attribute Dependents: 2 -- [1. 2.] Number of values for attribute Telephone: 2 -- ['no' 'yes'] Number of values for attribute Foreign: 2 -- ['no' 'yes'] Number of values for attribute Approve: 2 -- ['bad' 'good'] ###Markdown It seems like the variables `Duration`, `CreditAmount` and `Age` are continuous and all the rest are discrete. The discrete variables are not in a very convenient format though. Ideally we would want the discrete attributes to take values between `0` and `n_values-1`. Pandas has a handy way of dealing with [Categorical](https://pandas.pydata.org/pandas-docs/version/1.3.1/user_guide/categorical.html) data, which you are encouraged to get familiar with.We will create a new DataFrame called `credit_clean` and convert all the discrete variables from `credit` into pandas Categorical types. Remember, we want to change the discrete variables only, so we will have to exclude the `CreditAmount`, `Age` and `Duration` attributes. Also, we don't really mind if the target variable is categorical, so we won't be transforming the `Approve` attribute either. Execute the cell below and make sure you understand what it does. ###Code credit_clean = credit.copy(deep=True) # Make a copy of the DataFrame for column in credit_clean.columns: if column not in ['CreditAmount', 'Age', 'Duration', 'Approve']: # Exclude non discrete columns and the target credit_clean[column] = credit_clean[column].astype('category') # Convert using astype(...) method ###Output _____no_output_____ ###Markdown ========== Question 1.8 ==========Display the first 10 data points of the clean data. Does it look like what you expected? ###Code # Student needs to provide code similar to below credit_clean.head(10) ###Output _____no_output_____ ###Markdown ========== Question 1.9 ==========There is a **conceptual** problem with using an (unordered) categorical input to train a decision tree. Can you figure it out? *Hint*: Look at the data-types supported by the `DecisionTreeClassifier`, specifically the input to the [fit](https://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeClassifier.htmlsklearn.tree.DecisionTreeClassifier.fit) method. ***Student needs to answer similar to below:***The Decision Tree algorithm requires numeric data: however, most of our categorical values have no notion of ordering, and hence using a floating-point representation as required by Scikit Learn's Decision Tree class would be enforcing the wrong assumptions. ========== Question 1.10 ==========Let us then convert the categorical values we just created into a one-hot-encoding. Pandas provides this functionality conveniently via the [`get_dummies`](https://pandas.pydata.org/pandas-docs/version/1.3.1/reference/api/pandas.get_dummies.html) method. Use it to generate a matrix `X` containing the one-hot encoded categorical features as well as the floating-point attributes (`CreditAmount`, `Age` and `Duration`). Also, (separately) extract the target variable (`Approve`) into a vector `y`. In each case, display the shape of the matrix/vector `X` and `y`. *TIP: It will be useful to keep track of the column names in the new one-hot-encoded format for later.* ###Code # Extract (and print the shape of) X # Student needs to provide code similar to below # Extract 1-Hot Encoding into X and add floating-point columns X = pd.get_dummies(credit_clean.drop('Approve', axis=1)) X[['CreditAmount', 'Age', 'Duration']] = credit_clean[['CreditAmount', 'Age', 'Duration']] column_names = X.columns # Convert to Numpy Array X = X.values print('X shape: {}'.format(X.shape)) # Extract the Target y (and print its shape) # Student needs to provide code similar to below y = credit_clean['Approve'].values # Target vector print('y shape: {}'.format(np.shape(y))) ###Output y shape: (1000,) ###Markdown (Confirm that you have 71 input features, one target variable and 1000 data points). *Bonus Question*: where did the 71 come from? Hold-out validationIn the next step we will be using a Decision Tree classifier model. To get an accurate estimate of the model's classification performance we will use hold-out validation. Familiriase yourself with the logic behind [`train_test_split CV`](http://scikit-learn.org/0.24/modules/cross_validation.htmlcross-validation-evaluating-estimator-performance) (also called `Hold-out` validation) and [how it is used](http://scikit-learn.org/0.24/modules/generated/sklearn.cross_validation.train_test_split.html) in `Scikit-learn`. Execute the cell below to create your training/testing sets by assigning 10% of the data to the test set (and convince yourself you understand what is going on). ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.9, test_size=0.1, random_state=0) ###Output _____no_output_____ ###Markdown ========== Question 1.11 ==========Confirm that `X_train` and `X_test` matrices are subsets of `X` by displaying the number of rows in the three matrices (no need to make use of set theory). ###Code # Student needs to provide code similar to below print('Number of instances in X: {}'.format(np.shape(X)[0])) print('Number of instances in X_train: {}'.format(X_train.shape[0])) print('Number of instances in X_test: {}'.format(X_test.shape[0])) print('Number of instances in X_train and X_test together: {}'.format(X_train.shape[0] + X_test.shape[0])) ###Output Number of instances in X: 1000 Number of instances in X_train: 900 Number of instances in X_test: 100 Number of instances in X_train and X_test together: 1000 ###Markdown ========== Question 1.12 ==========Now we will train a Decision Tree classifier on the training data. Read about [Decision Tree classifiers](http://scikit-learn.org/0.24/modules/tree.html) in `Scikit-learn` and how they are [used](http://scikit-learn.org/0.24/modules/generated/sklearn.tree.DecisionTreeClassifier.htmlsklearn.tree.DecisionTreeClassifier). Create a `DecisionTreeClassifier` instance, naming it `dt` and train it by using training data only (i.e. `X_train` and `y_tain`). Set the `criterion` attribute to `entropy` in order to measure the quality of splits by using the information gain. Use the default settings for the rest of parameters. By default, trees are grown to full depth; this means that very fine splits are made involving very few data points. Not only does this make the trees hard to visualise (they'll be deep), but also we could be overfitting the data. For now, we arbitrarily choose a depth of 3 for our tree (to make it easier to interpret below), but this is a parameter we could tune. For consistency, use a `random_state=1000`. ###Code # Student needs to provide code similar to below dt = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=1000) dt.fit(X_train.astype(float), y_train) ###Output _____no_output_____ ###Markdown We have mentioned in the class that decision trees have the advantage of being interpretable by humans. Now we visualise the decision tree we have just trained. Scikit-learn can export the tree in a `.dot` format. Run the following code (replace `column_names` with whatever you used to store the names of the columns of the extended feature-space): ###Code dot_data = export_graphviz(dt, out_file=None, feature_names=column_names, class_names=sorted(credit_clean['Approve'].unique()), filled=True, rounded=True, special_characters=False) graph = graphviz.Source(dot_data) graph ###Output _____no_output_____ ###Markdown An alternative way to visualise the tree is to open the output .dot file with an editor such as [this online .dot renderer](http://dreampuf.github.io/GraphvizOnline/). You can use the code below to create a dot-file and then copy and paste its contents into the online site (you can double click on the tree once it has been produced to view it in full screen). ###Code with open("tree.dot", 'w') as f: f = export_graphviz(dt, out_file=f, feature_names=column_names, class_names=credit_clean['Approve'].unique(), filled=True, rounded=True, special_characters=False) ###Output _____no_output_____ ###Markdown ========== Question 1.13 ==========Inspect the tree and 1. describe what it shows, explaining how you interpret any one-hot encoded data 1. indicate which is the attribute with the highest information gain. ***Student needs to answer similar to below:***TODOThe tree branches first on the CheckingAccount value, specifically whether the user has a checking account or not (since it is based on `_none` encoding, and the others all are checking account values. Below is a png of the tree produced. It shows that we will classify a loan as `bad` if the `CheckingAccount in (2, 3)` i.e. is '>=200' or 'none', and even worse if `OtherPlans in (1, 2)` i.e. is 'none' or 'stores'.The attribute with highest information gain is the one at the top of the tree - `CheckingAccount`. Its critical value is 1.5. To trace back the original values we have to make use of the associated LabelEncoder. ========== Question 1.14 ==========Tree-based estimators (i.e. decision trees and random forests) can be used to compute feature importances. The importance of a feature is computed as the (normalized) total reduction of entropy (or other used `criterion`) brought by that feature. Find the relevant attributes of the classifier you just trained (i.e. those which are actually used in this short tree) and display feature importances along with their names. ###Code # Student needs to provide code similar to below for i, column in enumerate(column_names): if dt.feature_importances_[i] > 0: print('Importance of feature {}:, {:.3f}'.format(column, dt.feature_importances_[i])) ###Output Importance of feature Duration:, 0.117 Importance of feature Age:, 0.046 Importance of feature CheckingAccount_<0:, 0.094 Importance of feature CheckingAccount_none:, 0.524 Importance of feature CreditHistory_critical:, 0.072 Importance of feature CreditHistory_ok_at_this_bank:, 0.058 Importance of feature OtherPlans_none:, 0.089 ###Markdown ========== Question 1.15 ==========Now we want to evaluate the performance of the classifier on unseen data. Use the trained model to predict the target variables for the test data set. Display the classification accuracy for both the training and test data sets. What do you observe? Are you surprised by the results? ###Code # Student needs to provide code similar to below prediction_train = dt.predict(X=X_train) prediction_test = dt.predict(X=X_test) print('Classification accuracy on training set: {:.3f}'.format(accuracy_score(y_train,prediction_train))) print('Classification accuracy on test set: {:.3f}'.format(accuracy_score(y_test,prediction_test))) ###Output Classification accuracy on training set: 0.738 Classification accuracy on test set: 0.610 ###Markdown ***Student needs to answer similar to below:***The decision tree classifier is (probably) not overfitting. The classification accuracy is similar for training and test. The decision tree has provided a very simple way to interpret the data - splitting it into 8 bins and applying a class to each bin. Clearly this is an oversimplification: even in the training data, the leaves of the tree contain many examples of the 'incorrect' class. **However, the very astute will make an observation:** *Hint: Have a look at the class distribution in the dataset.* ###Code fig, (ax1, ax2) = plt.subplots(1,2, figsize=(8,4)) sns.countplot(x=y_train, ax=ax1) ax1.set_title('Training set', size=12) ax1.set_xlabel(' ') sns.countplot(x=y_test, ax=ax2) ax2.set_xlabel(' ') ax2.set_ylabel('') ax2.set_title('Validation set', size=12) fig.suptitle('Target distribution', size=12) plt.show() ###Output _____no_output_____ ###Markdown The dummy classifier of predicting everything as 'good' **will beat this model (75% accuracy on Validation Set)!** In other words, our model is worse than the baseline. **Always compare your models with very simple baselines**. ========== Question 1.16 ==========Fit another `DecisionTreeClassifier` but this time grow it to full depth (i.e. remove the max_depth condition). Again, use a `random_state=1000`. Display the classification accuracy for training and test data as above. Again, what do you observe and are you surprised? ###Code # Student needs to provide code similar to below dt_fulldepth = DecisionTreeClassifier(criterion='entropy', random_state=1000) dt_fulldepth.fit(X_train, y_train) prediction_train2 = dt_fulldepth.predict(X=X_train) prediction_test2 = dt_fulldepth.predict(X=X_test) print('Classification accuracy on training set: {:.3f}'.format(accuracy_score(y_train,prediction_train2))) print('Classification accuracy on test set: {:.3f}'.format(accuracy_score(y_test,prediction_test2))) ###Output Classification accuracy on training set: 1.000 Classification accuracy on test set: 0.670 ###Markdown ***Student needs to answer similar to below:***This is a classic case of overfitting. The tree is allowed to grow to full depth and perfectly fit the training data. The result is that the test accuracy is much lower than the train; in fact it is only slightly more accurate than the very simple depth 3 tree! It's important to note that, when grown to full depth, decision tree classifiers can perfectly separate the training data (if no two datapoints are identical but have different classes). Performance on unseen data is likely to be much lower however. ========== Question 1.17 ==========By using seaborn's heatmap function, plot the normalised confusion matrices for both the training and test data sets **for the max_depth=3 decision tree from question 1.12**. Make sure you label axes appropriately. *Hint: You can make use of the `plot_confusion_matrix` function introduced in a previous lab, reproduced below.* ###Code def plot_confusion_matrix(cm, classes=None, title='Confusion matrix'): """Plots a confusion matrix.""" if classes is not None: sns.heatmap(cm, xticklabels=classes, yticklabels=classes, vmin=0., vmax=1., annot=True) else: sns.heatmap(cm, vmin=0., vmax=1.) plt.title(title) plt.ylabel('True label') plt.xlabel('Predicted label') # Normalised Data # Student needs to provide code similar to below cm = confusion_matrix(y_train, prediction_train) cm_norm = cm/cm.sum(axis=1)[:, np.newaxis] plt.figure() plot_confusion_matrix(cm_norm, classes=dt.classes_, title='Training confusion') # Validation Data # Student needs to provide code similar to below cm = confusion_matrix(y_test, prediction_test) cm_norm = cm/cm.sum(axis=1)[:, np.newaxis] plt.figure() plot_confusion_matrix(cm_norm, classes=dt.classes_, title='Test confusion') ###Output _____no_output_____ ###Markdown **N.B. it will be obvious if you have plotted the full depth decision tree as the training confusion matrix will be the identity** ========== Question 1.18 ==========Finally we will create a [`Random decision forest`](http://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.RandomForestClassifier.html) classifier and compare the performance of this classifier to that of the decision tree. The random decision forest is an ensemble classifier that consists of many decision trees and outputs the class that is the mode of the class's output by individual trees. Start with `n_estimators = 100`, use the `entropy` criterion and the same train/test split as before. Plot the classification accuracy of the random forest model on the test set and show the confusion matrix. How does the random decision forest compare performance wise to the decision tree? ###Code # Student needs to provide code similar to below rf = RandomForestClassifier(n_estimators=100, criterion='entropy') rf.fit(X_train, y_train) prediction_test = rf.predict(X=X_test) print('Classification accuracy on test set: {:.3f}'.format(accuracy_score(y_test,prediction_test))) cm = confusion_matrix(y_test, prediction_test) cm_norm = cm/cm.sum(axis=1)[:, np.newaxis] plt.figure() plot_confusion_matrix(cm_norm, classes=rf.classes_) ###Output Classification accuracy on test set: 0.750 ###Markdown ========== Question 1.19 ==========How high can you get the performance of the classifier by changing the max depth of the trees (`max_depth`), or the `max_features` parameters? Try a few values just to get a look. *Don't do a grid search or anything in-depth, just get a feel*. Try the same settings twice...do you get the same accuracy? ###Code # Student needs to provide code similar to below from itertools import product n_estimators = 500 max_features = [1, 'sqrt', 'log2'] max_depths = [None, 2, 5, 10] for f, d in product(max_features, max_depths): # with product we can iterate through all possible combinations rf = RandomForestClassifier(n_estimators=n_estimators, criterion='entropy', max_features=f, max_depth=d, n_jobs=2, random_state=1337) rf.fit(X_train, y_train) prediction_test = rf.predict(X=X_test) print('Classification accuracy on test set with max features = {} and max_depth = {}: {:.3f}'.format(f, d, accuracy_score(y_test,prediction_test))) cm = confusion_matrix(y_test, prediction_test) cm_norm = cm/cm.sum(axis=1)[:, np.newaxis] plt.figure() plot_confusion_matrix(cm_norm, classes=rf.classes_, title='Confusion matrix accuracy on test set with max features = {} and max_depth = {}: {:.3f}'.format(f, d, accuracy_score(y_test,prediction_test))) ###Output Classification accuracy on test set with max features = 1 and max_depth = None: 0.730 Classification accuracy on test set with max features = 1 and max_depth = 2: 0.750 Classification accuracy on test set with max features = 1 and max_depth = 5: 0.750 Classification accuracy on test set with max features = 1 and max_depth = 10: 0.750 Classification accuracy on test set with max features = sqrt and max_depth = None: 0.740 Classification accuracy on test set with max features = sqrt and max_depth = 2: 0.750 Classification accuracy on test set with max features = sqrt and max_depth = 5: 0.740 Classification accuracy on test set with max features = sqrt and max_depth = 10: 0.740 Classification accuracy on test set with max features = log2 and max_depth = None: 0.740 Classification accuracy on test set with max features = log2 and max_depth = 2: 0.750 Classification accuracy on test set with max features = log2 and max_depth = 5: 0.750 Classification accuracy on test set with max features = log2 and max_depth = 10: 0.720 ###Markdown N.B. Observing these confusion matrices you'll see something very important - for some configurations, the Random Forest **always predicts the majority class**: incidentally these are also the cases which do the best. This highlights (again) the importance of always checking performance against a dummy classifier!!!Additionally, if you want to reproduce your results, you must set the random seed (you can do this with the `random_state` argument). Random forests are...random! ========== Question 1.20 ==========Compare the feature importances as estimated with the decision tree and random forest classifiers. ###Code # Student needs to provide code similar to below rf = RandomForestClassifier(n_estimators=500, criterion='entropy', max_features=1, max_depth=10, n_jobs=2) rf.fit(X_train, y_train) _, ax1 = plt.subplots(1, 1, figsize=(16,6)) xx = np.arange(len(dt.feature_importances_)) ax1.bar(xx, dt.feature_importances_) ax1.set_xticks(xx) ax1.set_xticklabels(column_names, rotation='vertical') ax1.set_title('Decision tree depth 3 importances') plt.show() _, ax2 = plt.subplots(1, 1, figsize=(16,6)) ax2.bar(xx, rf.feature_importances_) ax2.set_xticks(xx) ax2.set_xticklabels(column_names, rotation='vertical') ax2.set_title('Random forest max features=1 , max depth=10') plt.show() for i, column in enumerate(column_names): if dt.feature_importances_[i] > 0 or rf.feature_importances_[i]: print('Importance of feature {}, DT: {:.3f}, RF: {:.3f}'.format(column, dt.feature_importances_[i], rf.feature_importances_[i])) ###Output _____no_output_____ ###Markdown 2. Linear regressionIn the second part of the lab we use the [CPU performance](https://archive.ics.uci.edu/ml/datasets/Computer+Hardware) dataset for a simple regression task. Famliarise yourself with the dataset before moving on to the next step. Note that the version we will be using is missing the `Model Name` and `PRP` attributes. Our task will be to use the remaining attributes to predict `ERP` values.Download the dataset and save it in a directory called `datasets` in the same folder that your notebooks live. Alternatively, you can save the dataset in any folder you wish and modify the `data_path` variable below accordingly. We will load our data into a pandas DataFrame structure. ###Code # Load the dataset data_path = os.path.join(os.getcwd(), 'datasets', 'cpu.csv') cpu = pd.read_csv(data_path, delimiter = ',') ###Output _____no_output_____ ###Markdown ========== Question 2.1 ==========Display the number of data points and attributes in the dataset. ###Code # Student needs to provide code similar to below print('Number of samples: {}, number of attributes: {}'.format(cpu.shape[0], cpu.shape[1])) ###Output Number of samples: 209, number of attributes: 8 ###Markdown ========== Question 2.2 ==========Get a feeling of the data by using pandas `describe()` method. ###Code # Student needs to provide code similar to below cpu.describe() ###Output _____no_output_____ ###Markdown ========== Question 2.3 ==========Display the first 10 data points of the dataset ###Code # Student needs to provide code similar to below cpu.head(10) ###Output _____no_output_____ ###Markdown ========== Question 2.4 ========== You might have noticed that the `vendor` attribute is categorical. This will give problems when using a linear regression model. For now we can simply remove this attribute. Create a new DataFrame called `cpu_clean` by copying `cpu` but omit the `vendor` attribute. Display the number of samples and attributes in the clean dataset as a sanity check. ###Code # Student needs to provide code similar to below cpu_clean = cpu.copy(deep=True) cpu_clean=cpu_clean.drop('vendor', axis=1) print('Clean dataset, number of samples: {}, number of attributes: {}'.format(cpu_clean.shape[0], cpu_clean.shape[1])) ###Output Clean dataset, number of samples: 209, number of attributes: 7 ###Markdown ========== Question 2.5 ==========Now -as always- we want to perform some exploratory data analysis. Remember that our task is to predict `ERP` values, so it's a good idea to inspect individual scatter plots of the target variable (`ERP`) against our input features. For this purpose we will use once again seaborn's pairplot implementation.Create a series of [pairplots](https://seaborn.github.io/generated/seaborn.pairplot.html?highlight=pairplotseaborn.pairplot) showing the pairwise relationship of `ERP` and the remaining attributes in the data. You can do so by specifying explicitly the `x_vars` and `y_vars` input arguments in the pairplot. *Hnt: Your final plot will consist of 6 subplots each contaning a scatter plot.* ###Code # Student needs to provide code similar to below g2 = sns.pairplot(data=cpu, x_vars=['MYCT', 'MMIN', 'MMAX', 'CACH', 'CHMIN', 'CHMAX'], y_vars = 'ERP', height=3) ###Output _____no_output_____ ###Markdown ========== Question 2.6 ==========* Do you think that ERP should be at least partially predictable from the input attributes?* Do any attributes exhibit significant correlations? ***Student needs to answer similar to below:***Yes, it seems that some input features are positively correlated with the target variable so it should be possible to partially predict it. The `MMIN` and `MMAX` features appear to exhibit the strongest correlations. On the other hand, the `MYCT` exhibits a strongly nonlinear negative correlation. ========== Question 2.7 ==========Now we have a feel for the data and we will try fitting a simple linear regression model. Similarly to what we did in the first part of the lab, we want to use cross-validation to evaluate the goodness of the fit.By using the `cpu_clean` dataset extract the raw values for the input features and the target variable and store them in two matrices, called `X` and `y` respectively. Then, split the dataset into training and testing sets by using a 75%-25% split (training/testing).Display the shapes of all matrices involved and double-check that all dimensionalities appear to be as expected. ###Code # Student needs to provide code similar to below X = cpu_clean.drop('ERP', axis=1).values y = cpu_clean['ERP'].values X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, test_size=0.25, random_state=0) print('Number of instances in X: {}'.format(np.shape(X)[0])) print('Number of instances in X_train: {}'.format(X_train.shape[0])) print('Number of instances in X_test: {}'.format(X_test.shape[0])) print('Number of instances in X_train and X_test together: {}'.format(X_train.shape[0] + X_test.shape[0])) ###Output Number of instances in X: 209 Number of instances in X_train: 156 Number of instances in X_test: 53 Number of instances in X_train and X_test together: 209 ###Markdown ========== Question 2.8 ==========Fit a simple linear regressor by using the [`LinearRegression`](http://scikit-learn.org/0.24/modules/generated/sklearn.linear_model.LinearRegression.html) model in Scikit-learn. Report the training accuracy by using the `score` attribute. What does this represent? ###Code # Student needs to provide code similar to below lm = LinearRegression(fit_intercept=True, normalize=True, copy_X=True) lm.fit(X_train, y_train) print('Training accuracy: {:.3f}'.format(lm.score(X_train, y_train))) ###Output Training accuracy: 0.916 ###Markdown ***Student needs to answer similar to below:***The accuracy score represents the coefficient of determination ($R^2$). This is at max 1, but can be negative. It will be 0 if you predict the mean of y for all observations. ========== Question 2.9 ==========Now report the testing accuracy by using the `score` attribute of the regressor as well as the `r2_score` metric. Confirm that these two yield identical results.How does the accuracy compare to the one reported on the training dataset? Do you think that your model does well on generalising on unseen data? ###Code # Student needs to provide code similar to below print('Testing accuracy by using score function: {:.3f}'.format(lm.score(X_test, y_test))) print('Testing accuracy by using r2_score meric: {:.3f}'.format(r2_score(y_test, lm.predict(X_test)))) ###Output Testing accuracy by using score function: 0.857 Testing accuracy by using r2_score meric: 0.857 ###Markdown ***Student needs to answer similar to below:***The accuracy on the test dataset is very similar to the training accuracy. From that we can conclude that the model has not overfitted the training data. ========== Question 2.10 ==========Now we want to get a feel for how good the fit is, so we wil plot the measured values against the predicted ones. Make use of the function provided below which takes as input arguments the measured (`y_true`) and predicted (`y_pred`) values of a target variable and produces a scatter plot for the two by also including a straight line going through the origin. Where would you expect the points to be for a perfect fit? ###Code def fit_scatter(y_true, y_pred): assert y_true.shape == y_pred.shape fig, ax = plt.subplots() ax.scatter(y_true, y_pred) ax.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') # Student needs to provide code similar to below prediction = lm.predict(X_test) fit_scatter(y_test, prediction) ###Output _____no_output_____ ###Markdown ***Student needs to answer similar to below:***If the fit was perfect all the points would be sitting on the straight line going through the origin. ========== Question 2.11 ==========Another way of assessing the performance of the model is to inspect the distribution of the errors. Make a histogram plot by using seaborn's `distplot` function. This will also show an estimate of the underlying distribution.Does it look like the errors are normally distributed? Would you trust the fit of the distribution on the graph? Explain why. ###Code # Student needs to provide code similar to below g = sns.histplot(y_test-prediction, kde=True, color="b") sns.rugplot(data=y_test-prediction, color="b") ###Output _____no_output_____ ###Markdown ***Student needs to answer similar to below:***We should be very careful before making any judgements since the number of samples is relatively small in this case. However, the errors seem to follow a Gaussian (normal) distribution. There is some evidence to suggest the model is very slightly over predicting more than under predicting (given the skew). ========== Question 2.12 ==========Above we deleted the vendor variable. However, we can use nominal attributes in regression by converting them to numeric, exactly in the same way that we did at the first part of this lab. Now, use the original `cpu` dataset and convert the `vendor` attribute to a numeric one by means of one-hot-encoding. Then train a linear regression model to the data and compare its performance to the one we had previously. Did adding the *binazired vendor* variable help? ###Code # Student needs to provide code similar to below # First extract the Data vendor_1hot = pd.get_dummies(cpu['vendor']) X = pd.concat([cpu.drop(['ERP', 'vendor'], axis=1), vendor_1hot], axis=1).values y = cpu['ERP'].values # Split into Training/Testing X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, test_size=0.25, random_state=0) # Train & Predict lm = LinearRegression(fit_intercept=True, normalize=True, copy_X=True).fit(X_train,y_train) prediction_new = lm.predict(X_test) print('New accuracy on test set: {:.3f}'.format(lm.score(X_test,y_test))) ###Output New accuracy on test set: 0.762
benchmarks/data/codes/multiple_neutrino_distances.ipynb
###Markdown Produce comoving distances and distance moduli for cosmologies with multiple non-degenerate massive neutrinos, using astropy. Set the cosmological parameter we will use: ###Code # Use these for each test OmegaC = 0.25 OmegaB = 0.05 h = 0.7 As = 2.1*10**(-9) # We set the total number of neutrinos to 3 instead of 3.046 # This is just because astropy only allows splitting Neff equally between all neutrinos species # So we need an integer to match our code. Nnu_tot = 3 # Use 5 different sets of these parameters OmegaL = [0.7, 0.7, 0.7, 0.65, 0.75] w0 = [-1., -0.9, -0.9, -0.9, -0.9] wa = [0., 0., 0.1, 0.1, 0.1] mnu_set_0 = [0.04, 0., 0.] * u.eV mnu_set_1 = [0.05, 0.01, 0.] * u.eV mnu_set_2 = [0.03, 0.02, 0.04] * u.eV mnu_set_3 = [0.05, 0., 0.] * u.eV mnu_set_4 = [0.03, 0.02, 0.] * u.eV mnu = [mnu_set_0, mnu_set_1, mnu_set_2, mnu_set_3, mnu_set_4] ###Output _____no_output_____ ###Markdown Set up the 5 cosmologies with astropy cosmology objects ###Code cosmo0 = cosmo.w0waCDM(100*h, OmegaC + OmegaB, OmegaL[0], w0 = w0[0], wa = wa[0], Tcmb0 = 2.725, Neff = Nnu_tot, m_nu= mnu_set_0, Ob0 = OmegaB) cosmo1 = cosmo.w0waCDM(100*h, OmegaC + OmegaB, OmegaL[1], w0 = w0[1], wa = wa[1], Tcmb0 = 2.725, Neff = Nnu_tot, m_nu= mnu_set_1, Ob0 = OmegaB) cosmo2 = cosmo.w0waCDM(100*h, OmegaC + OmegaB, OmegaL[2], w0 = w0[2], wa = wa[2], Tcmb0 = 2.725, Neff = Nnu_tot, m_nu= mnu_set_2, Ob0 = OmegaB) cosmo3 = cosmo.w0waCDM(100*h, OmegaC + OmegaB, OmegaL[3], w0 = w0[3], wa = wa[3], Tcmb0 = 2.725, Neff = Nnu_tot, m_nu= mnu_set_3, Ob0 = OmegaB) cosmo4 = cosmo.w0waCDM(100*h, OmegaC + OmegaB, OmegaL[4], w0 = w0[4], wa = wa[4], Tcmb0 = 2.725, Neff = Nnu_tot, m_nu= mnu_set_4,Ob0 = OmegaB) ###Output _____no_output_____ ###Markdown Set the redshifts ###Code z = [1., 2., 3., 4., 5.] ###Output _____no_output_____ ###Markdown Get the comoving distances ###Code # Loop over models chi0 = (cosmo0.comoving_distance(z)).value chi1 = (cosmo1.comoving_distance(z)).value chi2 = (cosmo2.comoving_distance(z)).value chi3 = (cosmo3.comoving_distance(z)).value chi4 = (cosmo4.comoving_distance(z)).value ###Output _____no_output_____ ###Markdown Output these to a benchmark file ###Code with open('./benchmark/chi_mnu_model1-5.txt', 'a') as out_file: out_file.write('#z chi(z,model1) chi(z,model2) chi(z,model3) chi(z,model4) chi(z,model5)\n') out_file.write(str(z[0])+'\t'+str(chi0[0])+'\t'+str(chi1[0])+'\t'+str(chi2[0])+'\t'+str(chi3[0])+'\t'+str(chi4[0])+'\n') out_file.write(str(z[1])+'\t'+str(chi0[1])+'\t'+str(chi1[1])+'\t'+str(chi2[1])+'\t'+str(chi3[1])+'\t'+str(chi4[1])+'\n') out_file.write(str(z[2])+'\t'+str(chi0[2])+'\t'+str(chi1[2])+'\t'+str(chi2[2])+'\t'+str(chi3[2])+'\t'+str(chi4[2])+'\n') out_file.write(str(z[3])+'\t'+str(chi0[3])+'\t'+str(chi1[3])+'\t'+str(chi2[3])+'\t'+str(chi3[3])+'\t'+str(chi4[3])+'\n') out_file.write(str(z[4])+'\t'+str(chi0[4])+'\t'+str(chi1[4])+'\t'+str(chi2[4])+'\t'+str(chi3[4])+'\t'+str(chi4[4])+'\n') ###Output _____no_output_____ ###Markdown Get also the distance moduli ###Code dm0 = (cosmo0.distmod(z)).value dm1 = (cosmo1.distmod(z)).value dm2 = (cosmo2.distmod(z)).value dm3 = (cosmo3.distmod(z)).value dm4 = (cosmo4.distmod(z)).value ###Output _____no_output_____ ###Markdown And output that to file ###Code with open('./benchmark/dm_mnu_model1-5.txt', 'a') as out_file: out_file.write('#z dm(z,model1) dm(z,model2) dm(z,model3) dm(z,model4) dm(z,model5)\n') out_file.write(str(z[0])+'\t'+str(dm0[0])+'\t'+str(dm1[0])+'\t'+str(dm2[0])+'\t'+str(dm3[0])+'\t'+str(dm4[0])+'\n') out_file.write(str(z[1])+'\t'+str(dm0[1])+'\t'+str(dm1[1])+'\t'+str(dm2[1])+'\t'+str(dm3[1])+'\t'+str(dm4[1])+'\n') out_file.write(str(z[2])+'\t'+str(dm0[2])+'\t'+str(dm1[2])+'\t'+str(dm2[2])+'\t'+str(dm3[2])+'\t'+str(dm4[2])+'\n') out_file.write(str(z[3])+'\t'+str(dm0[3])+'\t'+str(dm1[3])+'\t'+str(dm2[3])+'\t'+str(dm3[3])+'\t'+str(dm4[3])+'\n') out_file.write(str(z[4])+'\t'+str(dm0[4])+'\t'+str(dm1[4])+'\t'+str(dm2[4])+'\t'+str(dm3[4])+'\t'+str(dm4[4])+'\n') ###Output _____no_output_____ ###Markdown Now do this for high redshifts ###Code z_high = [10., 20., 50., 100., 200., 500., 1000.] # Loop over models chi0_high = (cosmo0.comoving_distance(z_high)).value chi1_high = (cosmo1.comoving_distance(z_high)).value chi2_high = (cosmo2.comoving_distance(z_high)).value chi3_high = (cosmo3.comoving_distance(z_high)).value chi4_high = (cosmo4.comoving_distance(z_high)).value with open('./benchmark/chi_hiz_mnu_model1-5.txt', 'a') as out_file: out_file.write('#z chi(z,model1) chi(z,model2) chi(z,model3) chi(z,model4) chi(z,model5)\n') out_file.write(str(z_high[0])+'\t'+str(chi0_high[0])+'\t'+str(chi1_high[0])+'\t'+str(chi2_high[0])+'\t'+str(chi3_high[0])+'\t'+str(chi4_high[0])+'\n') out_file.write(str(z_high[1])+'\t'+str(chi0_high[1])+'\t'+str(chi1_high[1])+'\t'+str(chi2_high[1])+'\t'+str(chi3_high[1])+'\t'+str(chi4_high[1])+'\n') out_file.write(str(z_high[2])+'\t'+str(chi0_high[2])+'\t'+str(chi1_high[2])+'\t'+str(chi2_high[2])+'\t'+str(chi3_high[2])+'\t'+str(chi4_high[2])+'\n') out_file.write(str(z_high[3])+'\t'+str(chi0_high[3])+'\t'+str(chi1_high[3])+'\t'+str(chi2_high[3])+'\t'+str(chi3_high[3])+'\t'+str(chi4_high[3])+'\n') out_file.write(str(z_high[4])+'\t'+str(chi0_high[4])+'\t'+str(chi1_high[4])+'\t'+str(chi2_high[4])+'\t'+str(chi3_high[4])+'\t'+str(chi4_high[4])+'\n') out_file.write(str(z_high[5])+'\t'+str(chi0_high[5])+'\t'+str(chi1_high[5])+'\t'+str(chi2_high[5])+'\t'+str(chi3_high[5])+'\t'+str(chi4_high[5])+'\n') out_file.write(str(z_high[6])+'\t'+str(chi0_high[6])+'\t'+str(chi1_high[6])+'\t'+str(chi2_high[6])+'\t'+str(chi3_high[6])+'\t'+str(chi4_high[6])+'\n') dm0_high = (cosmo0.distmod(z_high)).value dm1_high = (cosmo1.distmod(z_high)).value dm2_high = (cosmo2.distmod(z_high)).value dm3_high = (cosmo3.distmod(z_high)).value dm4_high = (cosmo4.distmod(z_high)).value with open('./benchmark/dm_hiz_mnu_model1-5.txt', 'a') as out_file: out_file.write('#z dm(z,model1) dm(z,model2) dm(z,model3) dm(z,model4) dm(z,model5)\n') out_file.write(str(z_high[0])+'\t'+str(dm0_high[0])+'\t'+str(dm1_high[0])+'\t'+str(dm2_high[0])+'\t'+str(dm3_high[0])+'\t'+str(dm4_high[0])+'\n') out_file.write(str(z_high[1])+'\t'+str(dm0_high[1])+'\t'+str(dm1_high[1])+'\t'+str(dm2_high[1])+'\t'+str(dm3_high[1])+'\t'+str(dm4_high[1])+'\n') out_file.write(str(z_high[2])+'\t'+str(dm0_high[2])+'\t'+str(dm1_high[2])+'\t'+str(dm2_high[2])+'\t'+str(dm3_high[2])+'\t'+str(dm4_high[2])+'\n') out_file.write(str(z_high[3])+'\t'+str(dm0_high[3])+'\t'+str(dm1_high[3])+'\t'+str(dm2_high[3])+'\t'+str(dm3_high[3])+'\t'+str(dm4_high[3])+'\n') out_file.write(str(z_high[4])+'\t'+str(dm0_high[4])+'\t'+str(dm1_high[4])+'\t'+str(dm2_high[4])+'\t'+str(dm3_high[4])+'\t'+str(dm4_high[4])+'\n') out_file.write(str(z_high[5])+'\t'+str(dm0_high[5])+'\t'+str(dm1_high[5])+'\t'+str(dm2_high[5])+'\t'+str(dm3_high[5])+'\t'+str(dm4_high[5])+'\n') out_file.write(str(z_high[6])+'\t'+str(dm0_high[6])+'\t'+str(dm1_high[6])+'\t'+str(dm2_high[6])+'\t'+str(dm3_high[6])+'\t'+str(dm4_high[6])+'\n') ###Output _____no_output_____
docs/tutorials/03-Extracting and Using Learned Embeddings.ipynb
###Markdown Utility Functions ###Code def load_classification_data(): dataset = fetch_covtype(data_home="data") data = np.hstack([dataset.data, dataset.target.reshape(-1, 1)]) col_names = [f"feature_{i}" for i in range(data.shape[-1])] col_names[-1] = "target" data = pd.DataFrame(data, columns=col_names) data["feature_0_cat"] = pd.qcut(data["feature_0"], q=4) data["feature_0_cat"] = "feature_0_" + data.feature_0_cat.cat.codes.astype(str) test_idx = data.sample(int(0.2 * len(data)), random_state=42).index test = data[data.index.isin(test_idx)] train = data[~data.index.isin(test_idx)] return (train, test, ["target"]) def print_metrics(y_true, y_pred, tag): if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series): y_true = y_true.values if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series): y_pred = y_pred.values if y_true.ndim>1: y_true=y_true.ravel() if y_pred.ndim>1: y_pred=y_pred.ravel() val_acc = accuracy_score(y_true, y_pred) val_f1 = f1_score(y_true, y_pred, average="macro") print(f"{tag} Acc: {val_acc} | {tag} F1: {val_f1}") ###Output _____no_output_____ ###Markdown Load Forest Cover Data ###Code train, test, target_col = load_classification_data() train, val = train_test_split(train, random_state=42) cat_col_names = ["feature_0_cat"] num_col_names = [col for col in train.columns if col not in cat_col_names+target_col] encoder = ce.OneHotEncoder(cols=cat_col_names) train_transform = encoder.fit_transform(train) val_transform = encoder.transform(val) test_transform = encoder.transform(test) ###Output D:\miniconda3\envs\df_encoder\lib\site-packages\category_encoders\utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead elif pd.api.types.is_categorical(cols): ###Markdown BaselineLet's use the default LightGBM model as a baseline. ###Code clf = lgb.LGBMClassifier(random_state=42, n_jobs=-1) clf.fit(train_transform.drop(columns=target_col), train_transform[target_col].values.ravel()) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_pred = clf.predict(test_transform.drop(columns='target')) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8528953641472251 | Validation F1: 0.825508819288814 Holdout Acc: 0.8517409338909829 | Holdout F1: 0.8175438711213123 ###Markdown CategoryEmbedding Model ###Code from pytorch_tabular import TabularModel from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig, TabNetModelConfig from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer data_config = DataConfig( target=target_col, #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented continuous_cols=num_col_names, categorical_cols=cat_col_names, continuous_feature_transform="quantile_normal", normalize_continuous_features=True ) trainer_config = TrainerConfig( auto_lr_find=True, # Runs the LRFinder to automatically derive a learning rate batch_size=1024, max_epochs=1000, gpus=-1, #index of the GPU to use. -1 means all available GPUs, None, means CPU ) optimizer_config = OptimizerConfig() model_config = CategoryEmbeddingModelConfig( task="classification", layers="4096-4096-512", # Number of nodes in each layer activation="LeakyReLU", # Activation between each layers learning_rate = 1e-3, metrics=["accuracy", "f1"], metrics_params=[{},{"average":"micro"}] ) tabular_model = TabularModel( data_config=data_config, model_config=model_config, optimizer_config=optimizer_config, trainer_config=trainer_config, ) tabular_model.fit(train=train, test=test) result = tabular_model.evaluate(test) print(result) ###Output _____no_output_____ ###Markdown To get the prediction as a dataframe, we can use the `predict` method. This will add predictions to the same dataframe that was passed in. For classification problems, we get both the probabilities and the final prediction taking 0.5 as the threshold ###Code pred_df = tabular_model.predict(test) pred_df.head() print_metrics(test['target'], pred_df["prediction"], tag="Holdout") ###Output Holdout Acc: 0.6147828780915991 | Holdout F1: 0.3269562480388109 ###Markdown Extract the Learned EmbeddingFor the models that support (CategoryEmbeddingModel and CategoryEmbeddingNODE), we can extract the learned embeddings into a sci-kit learn style Transformer. You can use this in your Sci-kit Learn pipelines and workflows as a drop in replacement. ###Code transformer = CategoricalEmbeddingTransformer(tabular_model) train_transform = transformer.fit_transform(train) clf = lgb.LGBMClassifier(random_state=42) clf.fit(train_transform.drop(columns='target'), train_transform['target']) val_transform = transformer.transform(val) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_transform = transformer.transform(test) test_pred = clf.predict(test_transform.drop(columns=target_col)) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8561396865829626 | Validation F1: 0.8260076319996745 Holdout Acc: 0.8555876835166348 | Holdout F1: 0.8233005227790506 ###Markdown Utility Functions ###Code def load_classification_data(): dataset = fetch_covtype(data_home="data") data = np.hstack([dataset.data, dataset.target.reshape(-1, 1)]) col_names = [f"feature_{i}" for i in range(data.shape[-1])] col_names[-1] = "target" data = pd.DataFrame(data, columns=col_names) data["feature_0_cat"] = pd.qcut(data["feature_0"], q=4) data["feature_0_cat"] = "feature_0_" + data.feature_0_cat.cat.codes.astype(str) test_idx = data.sample(int(0.2 * len(data)), random_state=42).index test = data[data.index.isin(test_idx)] train = data[~data.index.isin(test_idx)] return (train, test, ["target"]) def print_metrics(y_true, y_pred, tag): if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series): y_true = y_true.values if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series): y_pred = y_pred.values if y_true.ndim>1: y_true=y_true.ravel() if y_pred.ndim>1: y_pred=y_pred.ravel() val_acc = accuracy_score(y_true, y_pred) val_f1 = f1_score(y_true, y_pred, average="macro") print(f"{tag} Acc: {val_acc} | {tag} F1: {val_f1}") ###Output _____no_output_____ ###Markdown Load Forest Cover Data ###Code train, test, target_col = load_classification_data() train, val = train_test_split(train, random_state=42) cat_col_names = ["feature_0_cat"] num_col_names = [col for col in train.columns if col not in cat_col_names+target_col] encoder = ce.OneHotEncoder(cols=cat_col_names) train_transform = encoder.fit_transform(train) val_transform = encoder.transform(val) test_transform = encoder.transform(test) ###Output D:\miniconda3\envs\df_encoder\lib\site-packages\category_encoders\utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead elif pd.api.types.is_categorical(cols): ###Markdown BaselineLet's use the default LightGBM model as a baseline. ###Code clf = lgb.LGBMClassifier(random_state=42, n_jobs=-1) clf.fit(train_transform.drop(columns=target_col), train_transform[target_col].values.ravel()) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_pred = clf.predict(test_transform.drop(columns='target')) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8528953641472251 | Validation F1: 0.825508819288814 Holdout Acc: 0.8517409338909829 | Holdout F1: 0.8175438711213123 ###Markdown CategoryEmbedding Model ###Code from pytorch_tabular import TabularModel from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig, TabNetModelConfig from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer data_config = DataConfig( target=target_col, #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented continuous_cols=num_col_names, categorical_cols=cat_col_names, continuous_feature_transform="quantile_normal", normalize_continuous_features=True ) trainer_config = TrainerConfig( auto_lr_find=True, # Runs the LRFinder to automatically derive a learning rate batch_size=1024, max_epochs=1000, gpus=-1, #index of the GPU to use. -1 means all available GPUs, None, means CPU ) optimizer_config = OptimizerConfig() model_config = CategoryEmbeddingModelConfig( task="classification", layers="4096-4096-512", # Number of nodes in each layer activation="LeakyReLU", # Activation between each layers learning_rate = 1e-3, metrics=["accuracy", "f1"], metrics_params=[{},{"average":"micro"}] ) tabular_model = TabularModel( data_config=data_config, model_config=model_config, optimizer_config=optimizer_config, trainer_config=trainer_config, ) tabular_model.fit(train=train, test=test) result = tabular_model.evaluate(test) print(result) ###Output _____no_output_____ ###Markdown To get the prediction as a dataframe, we can use the `predict` method. This will add predictions to the same dataframe that was passed in. For classification problems, we get both the probabilities and the final prediction taking 0.5 as the threshold ###Code pred_df = tabular_model.predict(test) pred_df.head() print_metrics(test['target'], pred_df["prediction"], tag="Holdout") ###Output Holdout Acc: 0.6147828780915991 | Holdout F1: 0.3269562480388109 ###Markdown Extract the Learned EmbeddingFor the models that support (CategoryEmbeddingModel and CategoryEmbeddingNODE), we can extract the learned embeddings into a sci-kit learn style Transformer. You can use this in your Sci-kit Learn pipelines and workflows as a drop in replacement. ###Code transformer = CategoricalEmbeddingTransformer(tabular_model) train_transform = transformer.fit_transform(train) clf = lgb.LGBMClassifier(random_state=42) clf.fit(train_transform.drop(columns='target'), train_transform['target']) val_transform = transformer.transform(val) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_transform = transformer.transform(test) test_pred = clf.predict(test_transform.drop(columns=target_col)) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8561396865829626 | Validation F1: 0.8260076319996745 Holdout Acc: 0.8555876835166348 | Holdout F1: 0.8233005227790506 ###Markdown Utility Functions ###Code def load_classification_data(): dataset = fetch_covtype(data_home="data") data = np.hstack([dataset.data, dataset.target.reshape(-1, 1)]) col_names = [f"feature_{i}" for i in range(data.shape[-1])] col_names[-1] = "target" data = pd.DataFrame(data, columns=col_names) data["feature_0_cat"] = pd.qcut(data["feature_0"], q=4) data["feature_0_cat"] = "feature_0_" + data.feature_0_cat.cat.codes.astype(str) test_idx = data.sample(int(0.2 * len(data)), random_state=42).index test = data[data.index.isin(test_idx)] train = data[~data.index.isin(test_idx)] return (train, test, ["target"]) def print_metrics(y_true, y_pred, tag): if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series): y_true = y_true.values if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series): y_pred = y_pred.values if y_true.ndim>1: y_true=y_true.ravel() if y_pred.ndim>1: y_pred=y_pred.ravel() val_acc = accuracy_score(y_true, y_pred) val_f1 = f1_score(y_true, y_pred, average="macro") print(f"{tag} Acc: {val_acc} | {tag} F1: {val_f1}") ###Output _____no_output_____ ###Markdown Load Forest Cover Data ###Code train, test, target_col = load_classification_data() train, val = train_test_split(train, random_state=42) cat_col_names = ["feature_0_cat"] num_col_names = [col for col in train.columns if col not in cat_col_names+target_col] encoder = ce.OneHotEncoder(cols=cat_col_names) train_transform = encoder.fit_transform(train) val_transform = encoder.transform(val) test_transform = encoder.transform(test) ###Output D:\miniconda3\envs\df_encoder\lib\site-packages\category_encoders\utils.py:21: FutureWarning: is_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead elif pd.api.types.is_categorical(cols): ###Markdown BaselineLet's use the default LightGBM model as a baseline. ###Code clf = lgb.LGBMClassifier(random_state=42, n_jobs=-1) clf.fit(train_transform.drop(columns=target_col), train_transform[target_col].values.ravel()) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_pred = clf.predict(test_transform.drop(columns='target')) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8528953641472251 | Validation F1: 0.825508819288814 Holdout Acc: 0.8517409338909829 | Holdout F1: 0.8175438711213123 ###Markdown CategoryEmbedding Model ###Code from pytorch_tabular import TabularModel from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig, TabNetModelConfig from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig from pytorch_tabular.categorical_encoders import CategoricalEmbeddingTransformer data_config = DataConfig( target=target_col, #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented continuous_cols=num_col_names, categorical_cols=cat_col_names, continuous_feature_transform="quantile_normal", normalize_continuous_features=True ) trainer_config = TrainerConfig( auto_lr_find=True, # Runs the LRFinder to automatically derive a learning rate batch_size=1024, max_epochs=1000, gpus=1, #index of the GPU to use. 0, means CPU ) optimizer_config = OptimizerConfig() model_config = CategoryEmbeddingModelConfig( task="classification", layers="4096-4096-512", # Number of nodes in each layer activation="LeakyReLU", # Activation between each layers learning_rate = 1e-3, metrics=["accuracy", "f1"], metrics_params=[{},{"average":"micro"}] ) tabular_model = TabularModel( data_config=data_config, model_config=model_config, optimizer_config=optimizer_config, trainer_config=trainer_config, ) tabular_model.fit(train=train, test=test) result = tabular_model.evaluate(test) print(result) ###Output _____no_output_____ ###Markdown To get the prediction as a dataframe, we can use the `predict` method. This will add predictions to the same dataframe that was passed in. For classification problems, we get both the probabilities and the final prediction taking 0.5 as the threshold ###Code pred_df = tabular_model.predict(test) pred_df.head() print_metrics(test['target'], pred_df["prediction"], tag="Holdout") ###Output Holdout Acc: 0.6147828780915991 | Holdout F1: 0.3269562480388109 ###Markdown Extract the Learned EmbeddingFor the models that support (CategoryEmbeddingModel and CategoryEmbeddingNODE), we can extract the learned embeddings into a sci-kit learn style Transformer. You can use this in your Sci-kit Learn pipelines and workflows as a drop in replacement. ###Code transformer = CategoricalEmbeddingTransformer(tabular_model) train_transform = transformer.fit_transform(train) clf = lgb.LGBMClassifier(random_state=42) clf.fit(train_transform.drop(columns='target'), train_transform['target']) val_transform = transformer.transform(val) val_pred = clf.predict(val_transform.drop(columns=target_col)) print_metrics(val_transform[target_col], val_pred, "Validation") test_transform = transformer.transform(test) test_pred = clf.predict(test_transform.drop(columns=target_col)) print_metrics(test_transform[target_col], test_pred, "Holdout") ###Output Validation Acc: 0.8561396865829626 | Validation F1: 0.8260076319996745 Holdout Acc: 0.8555876835166348 | Holdout F1: 0.8233005227790506
DenseNets_With_TensorFlow.ipynb
###Markdown **From Paper To Keras: DenseNets With TensorFlow**[](https://medium.com/@equipintelligence/exploring-densenets-from-paper-to-keras-dcc01725488b)[](https://arxiv.org/abs/1608.06993)---In this notebook, we'll create the popular [DenseNet](https://arxiv.org/abs/1608.06993) architecture right from scratch! We'll understand the structure right from the beginning and implement it using `tf.keras`.We'll require a GPU Hardware accelerator for training the model. Change the runtime type to GPU by going to `Tools > Change Runtime Type > Hardware Accelerator > GPU`.***Note: It is highly recommended that you go through the research paper once as you'll come across various expressions which are from the paper, in this notebook.*** 1) Importing the Packages We import TensorFlow and NumPy. Other packages are imported as and when needed. ###Code import tensorflow as tf import numpy as np import os ###Output _____no_output_____ ###Markdown 2) Loading the DataWe'll train our model on the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset which can be easily loaded using the `tf.keras.datasets` module.You can try more datasets via [TensorFlow Datasets](https://www.tensorflow.org/datasets) and see the list of image datasets on their [catalog](https://www.tensorflow.org/datasets/catalog/overview). ###Code (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() # One-hot encoding for 10 classes. y_train = tf.keras.utils.to_categorical(y_train, 10) y_test = tf.keras.utils.to_categorical(y_test, 10) ###Output _____no_output_____ ###Markdown 3) The DenseNet Model a) The $H$ function ( $BatchNorm \to ReLU \to Conv$ )The namne "DenseNet" is enough to give an intuition of this. DenseNet is made of Dense Blocks. "Blocks" only refers to a group of different layers.Dense Blocks, rather than traditional CNNs where information ( or feature maps ) flow in a single direction, allow Convolution layers to access inputs from all the previous layers present in the network. The information flows improves as each layer is connected with all its previous layers as well the feature map of the previous layer.In the paper, you'll come across an expression,$x_l = H( x_{l-1} ) + x_{l-1} $$H$ represents a composite function which takes in an image/feature map ( $x$ ) and performs some operations on it. $ x \to Batch \ Normalization \to ReLU \to Zero \ Padding \to 3 \times 3 \ Convolution \ \to Dropout$The bottleneck layer could be added too. We've implemented it below. ###Code def H( inputs, num_filters , dropout_rate ): x = tf.keras.layers.BatchNormalization( epsilon=eps )( inputs ) x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.ZeroPadding2D((1, 1))(x) x = tf.keras.layers.Conv2D(num_filters, kernel_size=(3, 3), use_bias=False , kernel_initializer='he_normal' )(x) x = tf.keras.layers.Dropout(rate=dropout_rate )(x) return x ###Output _____no_output_____ ###Markdown b) The Transition LayersThe Transition layers perform the downsampling of the feature maps. The feature maps come from the previous block. The compression_factor observed below is the $\theta$ value from the paper which is the compression factor.Hence, if $m$ feature maps go into the transition layer, we'll produce $[m \theta]$ feature maps. $[ \ ]$ represents the floor function. ###Code def transition(inputs, num_filters , compression_factor , dropout_rate ): # compression_factor is the 'θ' x = tf.keras.layers.BatchNormalization( epsilon=eps )(inputs) x = tf.keras.layers.Activation('relu')(x) num_feature_maps = inputs.shape[1] # The value of 'm' x = tf.keras.layers.Conv2D( np.floor( compression_factor * num_feature_maps ).astype( np.int ) , kernel_size=(1, 1), use_bias=False, padding='same' , kernel_initializer='he_normal' , kernel_regularizer=tf.keras.regularizers.l2( 1e-4 ) )(x) x = tf.keras.layers.Dropout(rate=dropout_rate)(x) x = tf.keras.layers.AveragePooling2D(pool_size=(2, 2))(x) return x ###Output _____no_output_____ ###Markdown c) Finally, the Dense BlockEach block will get some feature maps as input from the previous transition layer. These inputs will then go through the $H$ function to produce an output ( $x_1$ ) .$x_1 = H( x_{0} )$Now, $x_1$ again goes into the $H$ function. But this time, its concatenated with $x_{0}$. So $x_2$ will be produced like,$x_2 = H( \ concat( \ x_1 , x_0 \ ) \ )$Similarly, $x_l$ will be produced by the concatenation of all output feature maps of the previous layers ( as well as the inputs $x_0$ )$x_l = H( \ concat( \ x_0 \ , x_1\ , \ x_2 , ... , \ x_{l-1} \ ) \ )$After getting $x_l$, it will be passed through the transition layer. From then onwards, the outputs of the transition layer again flow in another block. ###Code def dense_block( inputs, num_layers, num_filters, growth_rate , dropout_rate ): for i in range(num_layers): # num_layers is the value of 'l' conv_outputs = H(inputs, num_filters , dropout_rate ) inputs = tf.keras.layers.Concatenate()([conv_outputs, inputs]) num_filters += growth_rate # To increase the number of filters for each layer. return inputs, num_filters ###Output _____no_output_____ ###Markdown We'll add Dense Blocks and Transition layers one after the other. A `GlobalAveragePooling2D` layer ensures that the outputs are 2D and finally a softmax layer produces the class probabilities. ###Code input_shape = ( 32 , 32 , 3 ) num_blocks = 3 num_layers_per_block = 4 growth_rate = 16 dropout_rate = 0.4 compress_factor = 0.5 eps = 1.1e-5 num_filters = 16 inputs = tf.keras.layers.Input( shape=input_shape ) x = tf.keras.layers.Conv2D( num_filters , kernel_size=( 3 , 3 ) , use_bias=False, kernel_initializer='he_normal' , kernel_regularizer=tf.keras.regularizers.l2( 1e-4 ) )( inputs ) for i in range( num_blocks ): x, num_filters = dense_block( x, num_layers_per_block , num_filters, growth_rate , dropout_rate ) x = transition(x, num_filters , compress_factor , dropout_rate ) x = tf.keras.layers.GlobalAveragePooling2D()( x ) x = tf.keras.layers.Dense( 10 )( x ) # Num Classes for CIFAR-10 outputs = tf.keras.layers.Activation( 'softmax' )( x ) ###Output _____no_output_____ ###Markdown Everything compiled into a beautiful `tf.keras.models.Model`! ###Code model = tf.keras.models.Model( inputs , outputs ) model.compile( loss=tf.keras.losses.categorical_crossentropy ,optimizer=tf.keras.optimizers.Adam( lr=0.0001 ) ,metrics=[ 'acc' ]) model.summary() #Comment out the below line if you want to have an image of your model's structure. #tf.keras.utils.plot_model( model , show_shapes=True ) ###Output _____no_output_____ ###Markdown 4) Training the ModelWe'll train the model now. ###Code batch_size = 64 epochs = 100 model.fit( x_train , y_train , epochs=epochs , batch_size=batch_size , validation_data=( x_test , y_test ) ) ###Output _____no_output_____ ###Markdown 5) Evaluate the Model ###Code results = model.evaluate(x_test, y_test, batch_size=batch_size) print( 'Loss = {} and Accuracy = {} %'.format( results[0] , results[1] * 100 ) ) ###Output _____no_output_____
examples/notebooks/Cifar10_Ax_hyperparam_tuning.ipynb
###Markdown FastResNet Hyperparameters tuning with [Ax](https://ax.dev/) on CIFAR10In this notebook we provide an example of hyperparameter tuning with [Ax](https://ax.dev/) package. We will train a ResNet model from [awesome repository of David Page](https://github.com/davidcpage/cifar10-fast) on CIFAR10. Why Ax ?This is a good question ... Maybe this page could better answer : https://ax.dev/docs/why-ax.html> Ax is a platform for optimizing any kind of experiment, including machine learning experiments, A/B tests, and simulations. Ax can optimize discrete configurations (e.g., variants of an A/B test) using multi-armed bandit optimization, and continuous (e.g., integer or floating point)-valued configurations using Bayesian optimization. This makes it suitable for a wide range of applications.There are also interesting packages as [ray-tune](https://ray.readthedocs.io/en/latest/tune.html), [optuna](https://github.com/pfnet/optuna) and many others. As a side note, optuna provides an example with Ignite [here](https://github.com/pfnet/optuna/blob/master/examples/ignite_simple.py). Fast ResNet modelWe will reimplement a resnet model from David Page's [cifar-10 repository](https://github.com/davidcpage/cifar10-fast) which trains very fast (94% of test accuracy in 26 second on NVidia V100). For sake of simplicity, we will not apply all preprocessing used in the repository (please see [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb) for details). Setup dependenciesPlease install - `torchvision`- `Ax`- `tensorboard` ###Code !pip install pytorch-ignite tensorboardX ax-platform import sys sys.path.insert(0, "../../") import torch import ignite torch.__version__, ignite.__version__ ###Output _____no_output_____ ###Markdown Setup modelCifar10-fast model is inspired of ResNet family models and in order to run fast it uses various tricks like:- `conv + batch norm + activation + pool` -> `conv + pool + batch norm + activation`- `batchnorm` -> `ghost batchnorm` -> `frozen ghost batchnorm`- `ReLU` -> `CeLU`- data whitening as convolution non-learnable operation (we will not implement it)Network architecture looks like this:![fastresnet](https://github.com/abdulelahsm/ignite/blob/update-tutorials/examples/notebooks/assets/fastresnet_v2.svg?raw=1)Please see [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb) for more detail. ###Code import torch import torch.nn as nn import torch.nn.functional as F class GhostBatchNorm(nn.BatchNorm2d): """ From : https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb Batch norm seems to work best with batch size of around 32. The reasons presumably have to do with noise in the batch statistics and specifically a balance between a beneficial regularising effect at intermediate batch sizes and an excess of noise at small batches. Our batches are of size 512 and we can't afford to reduce them without taking a serious hit on training times, but we can apply batch norm separately to subsets of a training batch. This technique, known as 'ghost' batch norm, is usually used in a distributed setting but is just as useful when using large batches on a single node. It isn't supported directly in PyTorch but we can roll our own easily enough. """ def __init__(self, num_features, num_splits, eps=1e-05, momentum=0.1, weight=True, bias=True): super(GhostBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum) self.weight.data.fill_(1.0) self.bias.data.fill_(0.0) self.weight.requires_grad = weight self.bias.requires_grad = bias self.num_splits = num_splits self.register_buffer('running_mean', torch.zeros(num_features*self.num_splits)) self.register_buffer('running_var', torch.ones(num_features*self.num_splits)) def train(self, mode=True): if (self.training is True) and (mode is False): self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits) self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits) return super(GhostBatchNorm, self).train(mode) def forward(self, input): N, C, H, W = input.shape if self.training or not self.track_running_stats: return F.batch_norm( input.view(-1, C*self.num_splits, H, W), self.running_mean, self.running_var, self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits), True, self.momentum, self.eps).view(N, C, H, W) else: return F.batch_norm( input, self.running_mean[:self.num_features], self.running_var[:self.num_features], self.weight, self.bias, False, self.momentum, self.eps) class IdentityResidualBlock(nn.Module): def __init__(self, num_channels, conv_ksize=3, conv_pad=1, gbn_num_splits=16): super(IdentityResidualBlock, self).__init__() self.res1 = nn.Sequential( Conv2d(num_channels, num_channels, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(num_channels, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) self.res2 = nn.Sequential( Conv2d(num_channels, num_channels, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(num_channels, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) def forward(self, x): residual = x x = self.res1(x) x = self.res2(x) return x + residual # We override conv2d to get proper padding for kernel size = 2 class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): super(Conv2d, self).__init__(*args, **kwargs) if self.kernel_size == (2, 2): self.forward = self.ksize_2_forward self.ksize_2_padding = (0, self.padding[0], 0, self.padding[1]) self.padding = (0, 0) def ksize_2_forward(self, x): x = F.pad(x, pad=self.ksize_2_padding) return super(Conv2d, self).forward(x) class FastResNet(nn.Module): def __init__(self, num_classes=10, fmap_factor=64, conv_ksize=3, conv_pad=1, gbn_num_splits=512 // 32, classif_scale=0.0625): super(FastResNet, self).__init__() self.prep = nn.Sequential( Conv2d(3, fmap_factor, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(fmap_factor, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) self.layer1 = nn.Sequential( Conv2d(fmap_factor, fmap_factor * 2, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 2, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), IdentityResidualBlock(fmap_factor * 2, conv_ksize=conv_ksize, conv_pad=conv_pad, gbn_num_splits=gbn_num_splits) ) self.layer2 = nn.Sequential( Conv2d(fmap_factor * 2, fmap_factor * 4, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 4, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), ) self.layer3 = nn.Sequential( Conv2d(fmap_factor * 4, fmap_factor * 8, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 8, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), IdentityResidualBlock(fmap_factor * 8, conv_ksize=conv_ksize, conv_pad=conv_pad, gbn_num_splits=gbn_num_splits) ) self.pool = nn.MaxPool2d(kernel_size=4) self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(fmap_factor * 8, num_classes) ) self.scale = torch.tensor(0.0625, requires_grad=False) def forward(self, x): x = self.prep(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.pool(x) y = self.classifier(x) return y * self.scale model = FastResNet(10, fmap_factor=64) def print_num_params(model, display_all_modules=False): total_num_params = 0 for n, p in model.named_parameters(): num_params = 1 for s in p.shape: num_params *= s if display_all_modules: print(f"{n}: {num_params}") total_num_params += num_params print("-" * 50) print(f"Total number of parameters: {total_num_params:.2e}") print_num_params(model) model = FastResNet(10, fmap_factor=64, conv_ksize=2) print_num_params(model) ###Output _____no_output_____ ###Markdown Setup dataflowWe will setup the dataflow using `torchvision` transformation and will not follow the suggestions of [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb). Data augmentations used to transform the dataset are- Random Crop- Flip left-right- Cutout ###Code import torch from torchvision.transforms import Compose, Pad, RandomHorizontalFlip, RandomErasing, RandomCrop, Normalize from torch.utils.data import DataLoader from torchvision.transforms import ToTensor from torchvision.datasets.cifar import CIFAR10 train_transform = Compose([ Pad(4), RandomCrop(32), RandomHorizontalFlip(), ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), RandomErasing(scale=(0.0625, 0.0625), ratio=(1.0, 1.0)) ]) test_transform = Compose([ ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) train_ds = CIFAR10("/tmp/cifar10", train=True, download=True, transform=train_transform) test_ds = CIFAR10("/tmp/cifar10", train=False, download=True, transform=train_transform) def get_train_test_loaders(): train_loader = DataLoader(train_ds, batch_size=512, num_workers=10, shuffle=True, drop_last=True, pin_memory=True) test_loader = DataLoader(test_ds, batch_size=512, num_workers=10, shuffle=False, drop_last=False, pin_memory=True) return train_loader, test_loader ###Output _____no_output_____ ###Markdown Setup criterion, optimizer and lr schedulingFollowing cifar10-fast, we will use label smoothing trick for improving the training speed and generalization of neural nets in classification problems. ###Code import torch.nn as nn import torch.optim as optim class CriterionWithLabelSmoothing(nn.Module): def __init__(self, criterion, alpha=0.2): super(CriterionWithLabelSmoothing, self).__init__() self.criterion = criterion if self.criterion.reduction != 'none': raise ValueError("Input criterion should have reduction equal none") self.alpha = alpha def forward(self, logits, targets): loss = self.criterion(logits, targets) log_probs = torch.log_softmax(logits, dim=1) klloss = -log_probs.mean(dim=1) out = (1.0 - self.alpha) * loss + self.alpha * klloss return out.mean(dim=0) def get_criterion(alpha): return CriterionWithLabelSmoothing(nn.CrossEntropyLoss(reduction='none'), alpha=0.2) def get_optimizer(model, momentum, weight_decay, nesterov): biases = [p for n, p in model.named_parameters() if "bias" in n] others = [p for n, p in model.named_parameters() if "bias" not in n] return optim.SGD( [{"params": others, "lr": 1.0, "weight_decay": weight_decay}, {"params": biases, "lr": 1.0, "weight_decay": weight_decay / 64}], momentum=momentum, nesterov=nesterov ) ###Output _____no_output_____ ###Markdown There is an implementation difference of current PyTorch SGD and SGD from cifar10-fast. The latter uses Sutskever et al implementation:```new_w = w + mu * v - lr * (dw + weight_decay * w)v = mu * prev_v - lr * (dw + weight_decay * w)```and PyTorch's one is ```new_w = w - lr * (mu * v + dw + weight_decay * w)v = mu * prev_v + dw + weight_decay * w``` ###Code from ignite.contrib.handlers import PiecewiseLinear, ParamGroupScheduler def get_lr_scheduler(optimizer, lr_max_value, lr_max_value_epoch, num_epochs, epoch_length): milestones_values = [ (0, 0.0), (epoch_length * lr_max_value_epoch, lr_max_value), (epoch_length * num_epochs - 1, 0.0) ] lr_scheduler1 = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values, param_group_index=0) milestones_values = [ (0, 0.0), (epoch_length * lr_max_value_epoch, lr_max_value * 64), (epoch_length * num_epochs - 1, 0.0) ] lr_scheduler2 = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values, param_group_index=1) lr_scheduler = ParamGroupScheduler( [lr_scheduler1, lr_scheduler2], ["lr scheduler (non-biases)", "lr scheduler (biases)"] ) return lr_scheduler %matplotlib inline num_epochs = 25 lr_max_value = 0.4 milestones_values = [(0, 0.0), (num_epochs // 5, lr_max_value), (num_epochs - 1, 0.0)] PiecewiseLinear.plot_values(num_epochs, param_name="lr", milestones_values=milestones_values) ###Output _____no_output_____ ###Markdown Setup hyperparameter tuningNow we are ready to setup hyperparameter tuning to optimize the following parameters in order to get higher accuracy on test dataset while training limited by 12 epochs:- learning rate peak value: `[0.1, 1.0]`- SGD momentum: `[0.7, 1.0]`- weight decay: `[0.0, 1e-3]`- label smoothing `alpha`: `[0.1, 0.5]`- number of features (`fmap_factor`): `[16, 24, 32, 40, 48, 56, 64, 72, 80]`- convolution kernel size: `3` or `2`- ... ###Code from ax.plot.contour import plot_contour from ax.plot.trace import optimization_trace_single_method from ax.service.managed_loop import optimize from ax.utils.notebook.plotting import render, init_notebook_plotting init_notebook_plotting() ###Output _____no_output_____ ###Markdown First, we need to create evaluation function that receives experiment parameters and returns test accuracy.Input parameters search space is defined as a list of dictionaries that have the following required keys: - "name" - parameter name, - "type" - parameter type ("range", "choice" or "fixed"), - "bounds" for range parameters, - "values" for choice parameters, and - "value" for fixed parameters.Experiment parameters object provided for a single experiment is a dictionary `parameter name: value or values`. Links: - [Ax Parameters API](https://ax.dev/api/core.htmlmodule-ax.core.parameter)- [Ax optimize function](https://ax.dev/api/service.htmlax.service.managed_loop.optimize)- [Ax parameters search space example](https://ax.dev/tutorials/gpei_hartmann_service.html2.-Set-up-experiment) ###Code from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events, convert_tensor from ignite.metrics import Accuracy from ignite.contrib.handlers import TensorboardLogger, ProgressBar from ignite.contrib.handlers.tensorboard_logger import OutputHandler, OptimizerParamsHandler, GradsHistHandler, \ global_step_from_engine # Transfer batch to GPU and set floating-point 16 def prepare_batch_fp16(batch, device=None, non_blocking=True): x, y = batch return (convert_tensor(x, device=device, non_blocking=non_blocking).half(), convert_tensor(y, device=device, non_blocking=non_blocking)) torch.backends.cudnn.benchmark = True num_epochs = 17 def run_experiment(parameters): device = 'cuda' fast_mode = parameters.get("fast_mode", True) # setup model model = FastResNet( num_classes=10, fmap_factor=parameters.get("fmap_factor"), conv_ksize=parameters.get("conv_ksize"), classif_scale=parameters.get("classif_scale") ).to(device).half() # setup dataloaders train_loader, test_loader = get_train_test_loaders() # setup solver criterion = get_criterion(parameters.get("alpha")).to(device) optimizer = get_optimizer( model, parameters.get("momentum"), parameters.get("weight_decay"), parameters.get("nesterov") ) lr_scheduler = get_lr_scheduler( optimizer, parameters.get("lr_max_value"), parameters.get("lr_max_value_epoch"), num_epochs=num_epochs, epoch_length=len(train_loader) ) # setup ignite trainer trainer = create_supervised_trainer(model, optimizer, criterion, device=device, non_blocking=True, prepare_batch=prepare_batch_fp16) # setup learning rate scheduler trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler) # setup tensorboard logger exp_log_name = f"exp_{parameters.get('fmap_factor')}_{parameters.get('conv_ksize')}_" + \ f"{parameters.get('alpha'):.2}_{parameters.get('lr_max_value'):.4}" tb_logger = TensorboardLogger(log_dir=f"/tmp/tb_logs/{exp_log_name}") if not fast_mode: # - log learning rate tb_logger.attach(trainer, OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) # - log training batch loss tb_logger.attach(trainer, OutputHandler(tag="training", output_transform=lambda x: {"batch loss": x}), event_name=Events.ITERATION_COMPLETED) # - log model grads tb_logger.attach(trainer, GradsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # setup a progress bar ProgressBar().attach(trainer, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.COMPLETED) # setup evaluator def output_transform(output): y_pred, y = output y_pred = y_pred.float() return y_pred, y metrics = { "test accuracy": Accuracy(output_transform=output_transform) } evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True, prepare_batch=prepare_batch_fp16) # evaluate trained model each 3 epochs @trainer.on(Events.EPOCH_COMPLETED) def run_evaluation(engine): c1 = (engine.state.epoch - 1) % 3 == 0 c2 = engine.state.epoch == engine.state.max_epochs if (c1 and not fast_mode) or c2: evaluator.run(test_loader) if not fast_mode: # - log test accuracy tb_logger.attach(evaluator, OutputHandler(tag="validation", metric_names="all", global_step_transform=global_step_from_engine(trainer)), event_name=Events.EPOCH_COMPLETED) trainer.run(train_loader, max_epochs=num_epochs) test_acc = evaluator.state.metrics['test accuracy'] # dump hparams/result to Tensorboard tb_logger.writer.add_hparams(parameters, {'hparam/test_accuracy': test_acc}) tb_logger.close() return test_acc ###Output _____no_output_____ ###Markdown Original training configurations gives us the following result: ###Code batch_size = 512 num_epochs = 20 run_experiment( parameters={ "fmap_factor": 64, "conv_ksize": 3, "classif_scale": 0.0625, "alpha": 0.2, "momentum": 0.9, "weight_decay": 5e-4, "nesterov": True, "lr_max_value": 1.0, "lr_max_value_epoch": num_epochs // 5, "fast_mode": False } ) ###Output _____no_output_____ ###Markdown Setup parameters search space ###Code parameters_space = [ { "name": "fmap_factor", "type": "range", "bounds": [48, 80], }, { "name": "conv_ksize", "type": "choice", "values": [2, 3], }, { "name": "classif_scale", "type": "range", "bounds": [0.00625, 0.250], }, { "name": "alpha", "type": "range", "bounds": [0.1, 0.5], }, { "name": "momentum", "type": "range", "bounds": [0.7, 1.0], }, { "name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3], "value_type": "float", }, { "name": "nesterov", "type": "choice", "values": [True, False], }, { "name": "lr_max_value", "type": "range", "bounds": [0.1, 1.0], }, { "name": "lr_max_value_epoch", "type": "range", "bounds": [1, 10], }, ] ###Output _____no_output_____ ###Markdown Start tuning ###Code num_epochs = exp_num_epochs = 20 best_parameters, values, experiment, model = optimize( parameters=parameters_space, evaluation_function=run_experiment, objective_name='test accuracy', total_trials=30 ) ###Output _____no_output_____ ###Markdown We found the best parameters that give the following outcome: ###Code means, covariances = values print(f"\nBest parameters: {best_parameters}\n") print(f"Test accuracy: {means} ± {covariances}") ###Output _____no_output_____ ###Markdown Let's plot contours showing test accuracy as a function of the two hyperparameters. ###Code render(plot_contour(model=model, param_x='lr_max_value', param_y='momentum', metric_name='test accuracy')) ###Output _____no_output_____ ###Markdown Let's retrain the model with best found parameters and compare with previous baseline: ###Code batch_size = 512 num_epochs = 20 best_parameters_copy = dict(best_parameters) best_parameters_copy['fast_mode'] = False run_experiment( parameters=best_parameters_copy ) ###Output _____no_output_____ ###Markdown FastResNet Hyperparameters tuning with [Ax](https://ax.dev/) on CIFAR10In this notebook we provide an example of hyperparameter tuning with [Ax](https://ax.dev/) package. We will train a ResNet model from [awesome repository of David Page](https://github.com/davidcpage/cifar10-fast) on CIFAR10. Why Ax ?This is a good question ... Maybe this page could better answer : https://ax.dev/docs/why-ax.html> Ax is a platform for optimizing any kind of experiment, including machine learning experiments, A/B tests, and simulations. Ax can optimize discrete configurations (e.g., variants of an A/B test) using multi-armed bandit optimization, and continuous (e.g., integer or floating point)-valued configurations using Bayesian optimization. This makes it suitable for a wide range of applications.There are also interesting packages as [ray-tune](https://ray.readthedocs.io/en/latest/tune.html), [optuna](https://github.com/pfnet/optuna) and many others. As a side note, optuna provides an example with Ignite [here](https://github.com/pfnet/optuna/blob/master/examples/ignite_simple.py). Fast ResNet modelWe will reimplement a resnet model from David Page's [cifar-10 repository](https://github.com/davidcpage/cifar10-fast) which trains very fast (94% of test accuracy in 26 second on NVidia V100). For sake of simplicity, we will not apply all preprocessing used in the repository (please see [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb) for details). Setup dependenciesPlease install - `torchvision`- `Ax`- tensorboard ###Code !pip install pytorch-ignite tensorboardX import sys sys.path.insert(0, "../../") import torch import ignite torch.__version__, ignite.__version__ ###Output _____no_output_____ ###Markdown Setup modelCifar10-fast model is inspired of ResNet family models and in order to run fast it uses various tricks like:- `conv + batch norm + activation + pool` -> `conv + pool + batch norm + activation`- `batchnorm` -> `ghost batchnorm` -> `frozen ghost batchnorm`- `ReLU` -> `CeLU`- data whitening as convolution non-learnable operation (we will not implement it)Network architecture looks like this:![fastresnet](https://github.com/abdulelahsm/ignite/blob/update-tutorials/examples/notebooks/assets/fastresnet_v2.svg?raw=1)Please see [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb) for more detail. ###Code import torch import torch.nn as nn import torch.nn.functional as F class GhostBatchNorm(nn.BatchNorm2d): """ From : https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb Batch norm seems to work best with batch size of around 32. The reasons presumably have to do with noise in the batch statistics and specifically a balance between a beneficial regularising effect at intermediate batch sizes and an excess of noise at small batches. Our batches are of size 512 and we can't afford to reduce them without taking a serious hit on training times, but we can apply batch norm separately to subsets of a training batch. This technique, known as 'ghost' batch norm, is usually used in a distributed setting but is just as useful when using large batches on a single node. It isn't supported directly in PyTorch but we can roll our own easily enough. """ def __init__(self, num_features, num_splits, eps=1e-05, momentum=0.1, weight=True, bias=True): super(GhostBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum) self.weight.data.fill_(1.0) self.bias.data.fill_(0.0) self.weight.requires_grad = weight self.bias.requires_grad = bias self.num_splits = num_splits self.register_buffer('running_mean', torch.zeros(num_features*self.num_splits)) self.register_buffer('running_var', torch.ones(num_features*self.num_splits)) def train(self, mode=True): if (self.training is True) and (mode is False): self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits) self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits) return super(GhostBatchNorm, self).train(mode) def forward(self, input): N, C, H, W = input.shape if self.training or not self.track_running_stats: return F.batch_norm( input.view(-1, C*self.num_splits, H, W), self.running_mean, self.running_var, self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits), True, self.momentum, self.eps).view(N, C, H, W) else: return F.batch_norm( input, self.running_mean[:self.num_features], self.running_var[:self.num_features], self.weight, self.bias, False, self.momentum, self.eps) class IdentityResidualBlock(nn.Module): def __init__(self, num_channels, conv_ksize=3, conv_pad=1, gbn_num_splits=16): super(IdentityResidualBlock, self).__init__() self.res1 = nn.Sequential( Conv2d(num_channels, num_channels, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(num_channels, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) self.res2 = nn.Sequential( Conv2d(num_channels, num_channels, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(num_channels, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) def forward(self, x): residual = x x = self.res1(x) x = self.res2(x) return x + residual # We override conv2d to get proper padding for kernel size = 2 class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): super(Conv2d, self).__init__(*args, **kwargs) if self.kernel_size == (2, 2): self.forward = self.ksize_2_forward self.ksize_2_padding = (0, self.padding[0], 0, self.padding[1]) self.padding = (0, 0) def ksize_2_forward(self, x): x = F.pad(x, pad=self.ksize_2_padding) return super(Conv2d, self).forward(x) class FastResNet(nn.Module): def __init__(self, num_classes=10, fmap_factor=64, conv_ksize=3, conv_pad=1, gbn_num_splits=512 // 32, classif_scale=0.0625): super(FastResNet, self).__init__() self.prep = nn.Sequential( Conv2d(3, fmap_factor, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), GhostBatchNorm(fmap_factor, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3) ) self.layer1 = nn.Sequential( Conv2d(fmap_factor, fmap_factor * 2, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 2, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), IdentityResidualBlock(fmap_factor * 2, conv_ksize=conv_ksize, conv_pad=conv_pad, gbn_num_splits=gbn_num_splits) ) self.layer2 = nn.Sequential( Conv2d(fmap_factor * 2, fmap_factor * 4, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 4, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), ) self.layer3 = nn.Sequential( Conv2d(fmap_factor * 4, fmap_factor * 8, kernel_size=conv_ksize, padding=conv_pad, stride=1, bias=False), nn.MaxPool2d(kernel_size=2), GhostBatchNorm(fmap_factor * 8, num_splits=gbn_num_splits, weight=False), nn.CELU(alpha=0.3), IdentityResidualBlock(fmap_factor * 8, conv_ksize=conv_ksize, conv_pad=conv_pad, gbn_num_splits=gbn_num_splits) ) self.pool = nn.MaxPool2d(kernel_size=4) self.classifier = nn.Sequential( nn.Flatten(), nn.Linear(fmap_factor * 8, num_classes) ) self.scale = torch.tensor(0.0625, requires_grad=False) def forward(self, x): x = self.prep(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.pool(x) y = self.classifier(x) return y * self.scale model = FastResNet(10, fmap_factor=64) def print_num_params(model, display_all_modules=False): total_num_params = 0 for n, p in model.named_parameters(): num_params = 1 for s in p.shape: num_params *= s if display_all_modules: print(f"{n}: {num_params}") total_num_params += num_params print("-" * 50) print(f"Total number of parameters: {total_num_params:.2e}") print_num_params(model) model = FastResNet(10, fmap_factor=64, conv_ksize=2) print_num_params(model) ###Output _____no_output_____ ###Markdown Setup dataflowWe will setup the dataflow using `torchvision` transformation and will not follow the suggestions of [bag-of-trick notebook](https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb). Data augmentations used to transform the dataset are- Random Crop- Flip left-right- Cutout ###Code import torch from torchvision.transforms import Compose, Pad, RandomHorizontalFlip, RandomErasing, RandomCrop, Normalize from torch.utils.data import DataLoader from torchvision.transforms import ToTensor from torchvision.datasets.cifar import CIFAR10 train_transform = Compose([ Pad(4), RandomCrop(32), RandomHorizontalFlip(), ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), RandomErasing(scale=(0.0625, 0.0625), ratio=(1.0, 1.0)) ]) test_transform = Compose([ ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) train_ds = CIFAR10("/tmp/cifar10", train=True, download=True, transform=train_transform) test_ds = CIFAR10("/tmp/cifar10", train=False, download=True, transform=train_transform) def get_train_test_loaders(): train_loader = DataLoader(train_ds, batch_size=512, num_workers=10, shuffle=True, drop_last=True, pin_memory=True) test_loader = DataLoader(test_ds, batch_size=512, num_workers=10, shuffle=False, drop_last=False, pin_memory=True) return train_loader, test_loader ###Output _____no_output_____ ###Markdown Setup criterion, optimizer and lr schedulingFollowing cifar10-fast, we will use label smoothing trick for improving the training speed and generalization of neural nets in classification problems. ###Code import torch.nn as nn import torch.optim as optim class CriterionWithLabelSmoothing(nn.Module): def __init__(self, criterion, alpha=0.2): super(CriterionWithLabelSmoothing, self).__init__() self.criterion = criterion if self.criterion.reduction != 'none': raise ValueError("Input criterion should have reduction equal none") self.alpha = alpha def forward(self, logits, targets): loss = self.criterion(logits, targets) log_probs = torch.log_softmax(logits, dim=1) klloss = -log_probs.mean(dim=1) out = (1.0 - self.alpha) * loss + self.alpha * klloss return out.mean(dim=0) def get_criterion(alpha): return CriterionWithLabelSmoothing(nn.CrossEntropyLoss(reduction='none'), alpha=0.2) def get_optimizer(model, momentum, weight_decay, nesterov): biases = [p for n, p in model.named_parameters() if "bias" in n] others = [p for n, p in model.named_parameters() if "bias" not in n] return optim.SGD( [{"params": others, "lr": 1.0, "weight_decay": weight_decay}, {"params": biases, "lr": 1.0, "weight_decay": weight_decay / 64}], momentum=momentum, nesterov=nesterov ) ###Output _____no_output_____ ###Markdown There is an implementation difference of current PyTorch SGD and SGD from cifar10-fast. The latter uses Sutskever et al implementation:```new_w = w + mu * v - lr * (dw + weight_decay * w)v = mu * prev_v - lr * (dw + weight_decay * w)```and PyTorch's one is ```new_w = w - lr * (mu * v + dw + weight_decay * w)v = mu * prev_v + dw + weight_decay * w``` ###Code from ignite.contrib.handlers import PiecewiseLinear, ParamGroupScheduler def get_lr_scheduler(optimizer, lr_max_value, lr_max_value_epoch, num_epochs, epoch_length): milestones_values = [ (0, 0.0), (epoch_length * lr_max_value_epoch, lr_max_value), (epoch_length * num_epochs - 1, 0.0) ] lr_scheduler1 = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values, param_group_index=0) milestones_values = [ (0, 0.0), (epoch_length * lr_max_value_epoch, lr_max_value * 64), (epoch_length * num_epochs - 1, 0.0) ] lr_scheduler2 = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values, param_group_index=1) lr_scheduler = ParamGroupScheduler( [lr_scheduler1, lr_scheduler2], ["lr scheduler (non-biases)", "lr scheduler (biases)"] ) return lr_scheduler %matplotlib inline num_epochs = 25 lr_max_value = 0.4 milestones_values = [(0, 0.0), (num_epochs // 5, lr_max_value), (num_epochs - 1, 0.0)] PiecewiseLinear.plot_values(num_epochs, param_name="lr", milestones_values=milestones_values) ###Output _____no_output_____ ###Markdown Setup hyperparameter tuningNow we are ready to setup hyperparameter tuning to optimize the following parameters in order to get higher accuracy on test dataset while training limited by 12 epochs:- learning rate peak value: `[0.1, 1.0]`- SGD momentum: `[0.7, 1.0]`- weight decay: `[0.0, 1e-3]`- label smoothing `alpha`: `[0.1, 0.5]`- number of features (`fmap_factor`): `[16, 24, 32, 40, 48, 56, 64, 72, 80]`- convolution kernel size: `3` or `2`- ... ###Code from ax.plot.contour import plot_contour from ax.plot.trace import optimization_trace_single_method from ax.service.managed_loop import optimize from ax.utils.notebook.plotting import render, init_notebook_plotting init_notebook_plotting() ###Output _____no_output_____ ###Markdown First, we need to create evaluation function that receives experiment parameters and returns test accuracy.Input parameters search space is defined as a list of dictionaries that have the following required keys: - "name" - parameter name, - "type" - parameter type ("range", "choice" or "fixed"), - "bounds" for range parameters, - "values" for choice parameters, and - "value" for fixed parameters.Experiment parameters object provided for a single experiment is a dictionary `parameter name: value or values`. Links: - [Ax Parameters API](https://ax.dev/api/core.htmlmodule-ax.core.parameter)- [Ax optimize function](https://ax.dev/api/service.htmlax.service.managed_loop.optimize)- [Ax parameters search space example](https://ax.dev/tutorials/gpei_hartmann_service.html2.-Set-up-experiment) ###Code from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events, convert_tensor from ignite.metrics import Accuracy from ignite.contrib.handlers import TensorboardLogger, ProgressBar from ignite.contrib.handlers.tensorboard_logger import OutputHandler, OptimizerParamsHandler, GradsHistHandler, \ global_step_from_engine # Transfer batch to GPU and set floating-point 16 def prepare_batch_fp16(batch, device=None, non_blocking=True): x, y = batch return (convert_tensor(x, device=device, non_blocking=non_blocking).half(), convert_tensor(y, device=device, non_blocking=non_blocking)) torch.backends.cudnn.benchmark = True num_epochs = 17 def run_experiment(parameters): device = 'cuda' fast_mode = parameters.get("fast_mode", True) # setup model model = FastResNet( num_classes=10, fmap_factor=parameters.get("fmap_factor"), conv_ksize=parameters.get("conv_ksize"), classif_scale=parameters.get("classif_scale") ).to(device).half() # setup dataloaders train_loader, test_loader = get_train_test_loaders() # setup solver criterion = get_criterion(parameters.get("alpha")).to(device) optimizer = get_optimizer( model, parameters.get("momentum"), parameters.get("weight_decay"), parameters.get("nesterov") ) lr_scheduler = get_lr_scheduler( optimizer, parameters.get("lr_max_value"), parameters.get("lr_max_value_epoch"), num_epochs=num_epochs, epoch_length=len(train_loader) ) # setup ignite trainer trainer = create_supervised_trainer(model, optimizer, criterion, device=device, non_blocking=True, prepare_batch=prepare_batch_fp16) # setup learning rate scheduler trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler) # setup tensorboard logger exp_log_name = f"exp_{parameters.get('fmap_factor')}_{parameters.get('conv_ksize')}_" + \ f"{parameters.get('alpha'):.2}_{parameters.get('lr_max_value'):.4}" tb_logger = TensorboardLogger(log_dir=f"/tmp/tb_logs/{exp_log_name}") if not fast_mode: # - log learning rate tb_logger.attach(trainer, OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_STARTED) # - log training batch loss tb_logger.attach(trainer, OutputHandler(tag="training", output_transform=lambda x: {"batch loss": x}), event_name=Events.ITERATION_COMPLETED) # - log model grads tb_logger.attach(trainer, GradsHistHandler(model), event_name=Events.EPOCH_COMPLETED) # setup a progress bar ProgressBar().attach(trainer, event_name=Events.EPOCH_COMPLETED, closing_event_name=Events.COMPLETED) # setup evaluator def output_transform(output): y_pred, y = output y_pred = y_pred.float() return y_pred, y metrics = { "test accuracy": Accuracy(output_transform=output_transform) } evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True, prepare_batch=prepare_batch_fp16) # evaluate trained model each 3 epochs @trainer.on(Events.EPOCH_COMPLETED) def run_evaluation(engine): c1 = (engine.state.epoch - 1) % 3 == 0 c2 = engine.state.epoch == engine.state.max_epochs if (c1 and not fast_mode) or c2: evaluator.run(test_loader) if not fast_mode: # - log test accuracy tb_logger.attach(evaluator, OutputHandler(tag="validation", metric_names="all", global_step_transform=global_step_from_engine(trainer)), event_name=Events.EPOCH_COMPLETED) trainer.run(train_loader, max_epochs=num_epochs) test_acc = evaluator.state.metrics['test accuracy'] # dump hparams/result to Tensorboard tb_logger.writer.add_hparams(parameters, {'hparam/test_accuracy': test_acc}) tb_logger.close() return test_acc ###Output _____no_output_____ ###Markdown Original training configurations gives us the following result: ###Code batch_size = 512 num_epochs = 20 run_experiment( parameters={ "fmap_factor": 64, "conv_ksize": 3, "classif_scale": 0.0625, "alpha": 0.2, "momentum": 0.9, "weight_decay": 5e-4, "nesterov": True, "lr_max_value": 1.0, "lr_max_value_epoch": num_epochs // 5, "fast_mode": False } ) ###Output _____no_output_____ ###Markdown Setup parameters search space ###Code parameters_space = [ { "name": "fmap_factor", "type": "range", "bounds": [48, 80], }, { "name": "conv_ksize", "type": "choice", "values": [2, 3], }, { "name": "classif_scale", "type": "range", "bounds": [0.00625, 0.250], }, { "name": "alpha", "type": "range", "bounds": [0.1, 0.5], }, { "name": "momentum", "type": "range", "bounds": [0.7, 1.0], }, { "name": "weight_decay", "type": "range", "bounds": [1e-4, 1e-3], "value_type": "float", }, { "name": "nesterov", "type": "choice", "values": [True, False], }, { "name": "lr_max_value", "type": "range", "bounds": [0.1, 1.0], }, { "name": "lr_max_value_epoch", "type": "range", "bounds": [1, 10], }, ] ###Output _____no_output_____ ###Markdown Start tuning ###Code num_epochs = exp_num_epochs = 20 best_parameters, values, experiment, model = optimize( parameters=parameters_space, evaluation_function=run_experiment, objective_name='test accuracy', total_trials=30 ) ###Output _____no_output_____ ###Markdown We found the best parameters that give the following outcome: ###Code means, covariances = values print(f"\nBest parameters: {best_parameters}\n") print(f"Test accuracy: {means} ± {covariances}") ###Output _____no_output_____ ###Markdown Let's plot contours showing test accuracy as a function of the two hyperparameters. ###Code render(plot_contour(model=model, param_x='lr_max_value', param_y='momentum', metric_name='test accuracy')) ###Output _____no_output_____ ###Markdown Let's retrain the model with best found parameters and compare with previous baseline: ###Code batch_size = 512 num_epochs = 20 best_parameters_copy = dict(best_parameters) best_parameters_copy['fast_mode'] = False run_experiment( parameters=best_parameters_copy ) ###Output _____no_output_____
6-4-tut-graphtheory.ipynb
###Markdown ![image.png](attachment:image.png) ###Code A = matrix([[0,0,0,0,0], [0,0,2,1,0], [0,2,0,1,0], [0,1,1,0,1], [0,0,0,1,1]]) G = Graph(A) #S = vertex_coloring(G) G.show() ###Output _____no_output_____ ###Markdown Le graphe suivant a trois sommets. C'est une matrice adjacente de 3x3![image.png](attachment:image.png) ###Code A = matrix([[0,1,1], [1,0,1], [1,1,0]]) G = Graph(A) G.relabel({1:"a",2:"b",3:"3"}) S = vertex_coloring(G) G.show(partition=S,figsize=3) ###Output _____no_output_____ ###Markdown ![image.png](attachment:image.png)Pour remplir la matrice adjacente, on regarde les lignes et le colonnes. Si les sommets sont connectés par une ligne ou plusieurs, on compte le nombre de lignes et on rentre le nombre dans un élément de matrice. $V^a$ et $V^b$ ont une ligne commune, on dit dans ce cas qu'ils sont adjacents, on entre le nombre de connection $V^a et V^b$ ![image.png](attachment:image.png) $V^a et V^c$ sont adjacents par un bord ![image.png](attachment:image.png) $V^b et V^c$ sont adjacents par un bord ![image.png](attachment:image.png) on termine en mettant des zéros dans les endroits vides, c'est une matrice adjacente ![image.png](attachment:image.png) ###Code A = matrix([[0,1,1], [1,0,1], [1,1,0]]) G = Graph(A) G.relabel({1:"a",2:"b",3:"3"}) S = vertex_coloring(G) G.show(partition=S,figsize=3) d = {"a": [1,2], "b": [2,3], "c": [3,4], "d": [2,3], "e": [2,4], "f": [2,5], "g": [2,6], "h": [3,4]} G = Graph(d);G.show H=Graph([(2,1),(3,1),(4,1), (1,2),(3,2),(4,2), (5,2),(6,2)],multiedges=True);H.show() g = Graph([(1,3),(3,8),(5,2)]);g.show() import igraph # optional - python_igraph g = Graph(igraph.Graph([(1,3),(3,2),(0,2)])) g g=Graph(7) edges = [(1,2), (1,3), (1,4), (2,3), (2,4), (2,5), (2, 6), (3,4), (3,5), (3,6), (3,7), (4,6), (4,7), (5,6), (6,7)] g.add_edge(1,2),g.add_edge(1,3),g.add_edge(1,4),g.add_edge(2,3), g.add_edge(2,4),g.add_edge(2,5),g.add_edge(2,6),g.add_edge(3,4), g.add_edge(3,5),g.add_edge(3,6),g.add_edge(3,7),g.add_edge(4,6), g.add_edge(4,7),g.add_edge(5,6),g.add_edge(6,7) g.show() import networkx as nx edges = [(1,2), (1,3), (1,4), (2,3), (2,4), (2,5), (2, 6), (3,4), (3,5), (3,6), (3,7), (4,6), (4,7), (5,6), (6,7)] g.add_edge(1,2),g.add_edge(1,3),g.add_edge(1,4),g.add_edge(2,3), g.add_edge(2,4),g.add_edge(2,5),g.add_edge(2,6),g.add_edge(3,4), g.add_edge(3,5),g.add_edge(3,6),g.add_edge(3,7),g.add_edge(4,6), g.add_edge(4,7),g.add_edge(5,6),g.add_edge(6,7) g.show() g.adjacency_matrix(),g.incidence_matrix() H = graphs.HeawoodGraph();H.show() show(H.adjacency_matrix());show(H.incidence_matrix()) fa=g.adjacency_matrix();fa fi=g.incidence_matrix();fi fp=fa*fi #G.relabel({1:"a",2:"b",3:"3"}) fa=g.adjacency_matrix();fa FP = Graph(matrix(fa)) S = vertex_coloring(FP) FP.show(partition=S,figsize=3) H = graphs.HeawoodGraph() S = vertex_coloring(H) H.show(partition=S,figsize=3) ###Output _____no_output_____ ###Markdown ![image.png](attachment:image.png) ###Code fano=g.adjacency_matrix();show(fano) Fano=g.incidence_matrix();show(Fano) moebius_kantor = graphs.MoebiusKantorGraph() moebius_kantor.show(figsize=[4,4], graph_border=True) import networkx as nx import matplotlib.pyplot as plt G = nx.Graph() edges = [(1,2), (1,3), (1,4), (2,3), (2,4), (2,5), (2, 6), (3,4), (3,5), (3,6), (3,7), (4,6), (4,7), (5,6), (6,7)] G.add_edges_from(edges) nx.draw_networkx(G) limits = plt.axis('off') plt.show(G) import networkx as nx import matplotlib.pyplot as plt G = nx.Graph() edges = [(1,2), (1,3), (1,4), (2,3), (2,4), (2,5), (2, 6), (3,4), (3,5), (3,6), (3,7), (4,6), (4,7), (5,6), (6,7)] G.add_edges_from(edges) nx.draw_networkx(G) limits = plt.axis('off') plt.show(G) edges = [(1,2), (1,3), (1,4), (2,3), (2,4), (2,5), (2, 6), (3,4), (3,5), (3,6), (3,7), (4,6), (4,7), (5,6), (6,7)] Gamma = Graph(edges) S = vertex_coloring(Gamma) Gamma.show(partition=S,figsize=3,graph_border=True) Gamma.adjacency_matrix() ###Output _____no_output_____
tutorials/notebook/cx_site_chart_examples/dotplot_11.ipynb
###Markdown Example: CanvasXpress dotplot Chart No. 11This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:https://www.canvasxpress.org/examples/dotplot-11.htmlThis example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.Everything required for the chart to render is included in the code below. Simply run the code block. ###Code from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="dotplot11", data={ "y": { "vars": [ "GATA3", "PTEN", "XBP1" ], "smps": [ "BRCA1", "BRCA2", "BRCA3", "BRCA4", "BRCA5", "BRCA6", "BRCA7", "BRCA8", "BRCA9", "BRCA10", "BRCA11", "BRCA12", "BRCA13", "BRCA14", "BRCA15", "BRCA16", "BRCA17", "BRCA18", "BRCA19", "BRCA20", "BRCA21", "BRCA22", "BRCA23", "BRCA24", "BRCA25", "BRCA26", "BRCA27", "BRCA28", "BRCA29", "BRCA30", "BRCA31", "BRCA32", "BRCA33", "BRCA34", "BRCA35", "BRCA36", "BRCA37", "BRCA38", "BRCA39", "BRCA40", "BRCA41", "BRCA42", "BRCA43", "BRCA44", "BRCA45", "BRCA46", "BRCA47", "BRCA48", "BRCA49", "BRCA50", "BRCA51", "BRCA52", "BRCA53", "BRCA54", "BRCA55", "BRCA56", "BRCA57", "BRCA58", "BRCA59", "BRCA60", "BRCA61", "BRCA62", "BRCA63", "BRCA64", "BRCA65", "BRCA66", "BRCA67", "BRCA68", "BRCA69", "BRCA70", "BRCA71", "BRCA72", "BRCA73", "BRCA74", "BRCA75", "BRCA76", "BRCA77", "BRCA78", "BRCA79", "BRCA80", "BRCA81", "BRCA82", "BRCA83", "BRCA84", "BRCA85", "BRCA86", "BRCA87", "BRCA88", "BRCA89", "BRCA90", "BRCA91", "BRCA92", "BRCA93", "BRCA94", "BRCA95", "BRCA96", "BRCA97", "BRCA98", "BRCA99", "BRCA100", "BRCA101", "BRCA102", "BRCA103", "BRCA104", "BRCA105", "BRCA106", "BRCA107", "BRCA108", "BRCA109", "BRCA110", "BRCA111", "BRCA112", "BRCA113", "BRCA114", "BRCA115", "BRCA116", "BRCA117", "BRCA118", "BRCA119", "BRCA120", "BRCA121", "BRCA122", "BRCA123", "BRCA124", "BRCA125", "BRCA126", "BRCA127", "BRCA128", "BRCA129", "BRCA130", "BRCA131", "BRCA132", "BRCA133", "BRCA134", "BRCA135", "BRCA136", "BRCA137", "BRCA138", "BRCA139", "BRCA140", "BRCA141", "BRCA142", "BRCA143", "BRCA144", "BRCA145", "BRCA146", "BRCA147", "BRCA148", "BRCA149", "BRCA150", "BRCA151", "BRCA152", "BRCA153", "BRCA154", "BRCA155", "BRCA156", "BRCA157", "BRCA158", "BRCA159", "BRCA160", "BRCA161", "BRCA162", "BRCA163", "BRCA164", "BRCA165", "BRCA166", "BRCA167", "BRCA168", "BRCA169", "BRCA170", "BRCA171", "BRCA172", "BRCA173", "BRCA174", "BRCA175", "BRCA176", "BRCA177", "BRCA178", "BRCA179", "BRCA180", "BRCA181", "BRCA182", "BRCA183", "BRCA184", "BRCA185", "BRCA186", "BRCA187", "BRCA188", "BRCA189", "BRCA190", "BRCA191", "BRCA192", "BRCA193", "BRCA194", "BRCA195", "BRCA196", "BRCA197", "BRCA198", "BRCA199", "BRCA200", "BRCA201", "BRCA202", "BRCA203", "BRCA204", "BRCA205", "BRCA206", "BRCA207", "BRCA208", "BRCA209", "BRCA210", "BRCA211", "BRCA212", "BRCA213", "BRCA214", "BRCA215", "BRCA216", "BRCA217", "BRCA218", "BRCA219", "BRCA220", "BRCA221", "BRCA222", "BRCA223", "BRCA224", "BRCA225", "BRCA226", "BRCA227", "BRCA228", "BRCA229", "BRCA230", "BRCA231", "BRCA232", "BRCA233", "BRCA234", "BRCA235", "BRCA236", "BRCA237", "BRCA238", "BRCA239", "BRCA240", "BRCA241", "BRCA242", "BRCA243", "BRCA244", "BRCA245", "BRCA246", "BRCA247", "BRCA248", "BRCA249", "BRCA250", "BRCA251", "BRCA252", "BRCA253", "BRCA254", "BRCA255", "BRCA256", "BRCA257", "BRCA258", "BRCA259", "BRCA260", "BRCA261", "BRCA262", "BRCA263", "BRCA264", "BRCA265", "BRCA266", "BRCA267", "BRCA268", "BRCA269", "BRCA270", "BRCA271", "BRCA272", "BRCA273", "BRCA274", "BRCA275", "BRCA276", "BRCA277", "BRCA278", "BRCA279", "BRCA280", "BRCA281", "BRCA282", "BRCA283", "BRCA284", "BRCA285", "BRCA286", "BRCA287", "BRCA288", "BRCA289", "BRCA290", "BRCA291", "BRCA292", "BRCA293", "BRCA294", "BRCA295", "BRCA296", "BRCA297", "BRCA298", "BRCA299", "BRCA300", "BRCA301", "BRCA302", "BRCA303", "BRCA304", "BRCA305", "BRCA306", "BRCA307", "BRCA308", "BRCA309", "BRCA310", "BRCA311", "BRCA312", "BRCA313", "BRCA314", "BRCA315", "BRCA316", "BRCA317", "BRCA318", "BRCA319", "BRCA320", "BRCA321", "BRCA322", "BRCA323", "BRCA324", "BRCA325", "BRCA326", "BRCA327", "BRCA328", "BRCA329", "BRCA330", "BRCA331", "BRCA332", "BRCA333", "BRCA334", "BRCA335", "BRCA336", "BRCA337", "BRCA338", "BRCA339", "BRCA340", "BRCA341", "BRCA342", "BRCA343", "BRCA344", "BRCA345", "BRCA346", "BRCA347", "BRCA348", "BRCA349", "BRCA350", "BRCA351", "BRCA352", "BRCA353", "BRCA354", "BRCA355", "BRCA356", "BRCA357", "BRCA358", "BRCA359", "BRCA360", "BRCA361", "BRCA362", "BRCA363", "BRCA364", "BRCA365", "BRCA366", "BRCA367", "BRCA368", "BRCA369", "BRCA370", "BRCA371", "BRCA372", "BRCA373", "BRCA374", "BRCA375", "BRCA376", "BRCA377", "BRCA378", "BRCA379", "BRCA380", "BRCA381", "BRCA382", "BRCA383", "BRCA384", "BRCA385", "BRCA386", "BRCA387", "BRCA388", "BRCA389", "BRCA390", "BRCA391", "BRCA392", "BRCA393", "BRCA394", "BRCA395", "BRCA396", "BRCA397", "BRCA398", "BRCA399", "BRCA400", "BRCA401", "BRCA402", "BRCA403", "BRCA404", "BRCA405", "BRCA406", "BRCA407", "BRCA408", "BRCA409", "BRCA410", "BRCA411", "BRCA412", "BRCA413", "BRCA414", "BRCA415", "BRCA416", "BRCA417", "BRCA418", "BRCA419", "BRCA420", "BRCA421", "BRCA422", "BRCA423", "BRCA424", "BRCA425", "BRCA426", "BRCA427", "BRCA428", "BRCA429", "BRCA430", "BRCA431", "BRCA432", "BRCA433", "BRCA434", "BRCA435", "BRCA436", "BRCA437", "BRCA438", "BRCA439", "BRCA440", "BRCA441", "BRCA442", "BRCA443", "BRCA444", "BRCA445", "BRCA446", "BRCA447", "BRCA448", "BRCA449", "BRCA450", "BRCA451", "BRCA452", "BRCA453", "BRCA454", "BRCA455", "BRCA456", "BRCA457", "BRCA458", "BRCA459", "BRCA460", "BRCA461", "BRCA462", "BRCA463", "BRCA464", "BRCA465", "BRCA466", "BRCA467", "BRCA468", "BRCA469", "BRCA470", "BRCA471", "BRCA472", "BRCA473", "BRCA474", "BRCA475", "BRCA476", "BRCA477", "BRCA478", "BRCA479", "BRCA480", "BRCA481", "BRCA482", "BRCA483", "BRCA484", "BRCA485", "BRCA486", "BRCA487", "BRCA488", "BRCA489", "BRCA490", "BRCA491", "BRCA492", "BRCA493", "BRCA494", "BRCA495", "BRCA496", "BRCA497", "BRCA498", "BRCA499", "BRCA500", "BRCA501", "BRCA502", "BRCA503", "BRCA504", "BRCA505", "BRCA506", "BRCA507", "BRCA508", "BRCA509", "BRCA510", "BRCA511", "BRCA512", "BRCA513", "BRCA514", "BRCA515", "BRCA516", "BRCA517", "BRCA518", "BRCA519", "BRCA520", "BRCA521", "BRCA522", "BRCA523", "BRCA524", "BRCA525", "BRCA526", "BRCA527", "BRCA528", "BRCA529", "BRCA530", "BRCA531", "BRCA532", "BRCA533", "BRCA534", "BRCA535", "BRCA536", "BRCA537", "BRCA538", "BRCA539", "BRCA540", "BRCA541", "BRCA542", "BRCA543", "BRCA544", "BRCA545", "BRCA546", "BRCA547", "BRCA548", "BRCA549", "BRCA550", "BRCA551", "BRCA552", "BRCA553", "BRCA554", "BRCA555", "BRCA556", "BRCA557", "BRCA558", "BRCA559", "BRCA560", "BRCA561", "BRCA562", "BRCA563", "BRCA564", "BRCA565", "BRCA566", "BRCA567", "BRCA568", "BRCA569", "BRCA570", "BRCA571", "BRCA572", "BRCA573", "BRCA574", "BRCA575", "BRCA576", "BRCA577", "BRCA578", "BRCA579", "BRCA580", "BRCA581", "BRCA582", "BRCA583", "BRCA584", "BRCA585", "BRCA586", "BRCA587", "BRCA588", "BRCA589", "BRCA590", "OV1", "OV2", "OV3", "OV4", "OV5", "OV6", "OV7", "OV8", "OV9", "OV10", "OV11", "OV12", "OV13", "OV14", "OV15", "OV16", "OV17", "OV18", "OV19", "OV20", "OV21", "OV22", "OV23", "OV24", "OV25", "OV26", "OV27", "OV28", "OV29", "OV30", "OV31", "OV32", "OV33", "OV34", "OV35", "OV36", "OV37", "OV38", "OV39", "OV40", "OV41", "OV42", "OV43", "OV44", "OV45", "OV46", "OV47", "OV48", "OV49", "OV50", "OV51", "OV52", "OV53", "OV54", "OV55", "OV56", "OV57", "OV58", "OV59", "OV60", "OV61", "OV62", "OV63", "OV64", "OV65", "OV66", "OV67", "OV68", "OV69", "OV70", "OV71", "OV72", "OV73", "OV74", "OV75", "OV76", "OV77", "OV78", "OV79", "OV80", "OV81", "OV82", "OV83", "OV84", "OV85", "OV86", "OV87", "OV88", "OV89", "OV90", "OV91", "OV92", "OV93", "OV94", "OV95", "OV96", "OV97", "OV98", "OV99", "OV100", "OV101", "OV102", "OV103", "OV104", "OV105", "OV106", "OV107", "OV108", "OV109", "OV110", "OV111", "OV112", "OV113", "OV114", "OV115", "OV116", "OV117", "OV118", "OV119", "OV120", "OV121", "OV122", "OV123", "OV124", "OV125", "OV126", "OV127", "OV128", "OV129", "OV130", "OV131", "OV132", "OV133", "OV134", "OV135", "OV136", "OV137", "OV138", "OV139", "OV140", "OV141", "OV142", "OV143", "OV144", "OV145", "OV146", "OV147", "OV148", "OV149", "OV150", "OV151", "OV152", "OV153", "OV154", "OV155", "OV156", "OV157", "OV158", "OV159", "OV160", "OV161", "OV162", "OV163", "OV164", "OV165", "OV166", "OV167", "OV168", "OV169", "OV170", "OV171", "OV172", "OV173", "OV174", "OV175", "OV176", "OV177", "OV178", "OV179", "OV180", "OV181", "OV182", "OV183", "OV184", "OV185", "OV186", "OV187", "OV188", "OV189", "OV190", "OV191", "OV192", "OV193", "OV194", "OV195", "OV196", "OV197", "OV198", "OV199", "OV200", "OV201", "OV202", "OV203", "OV204", "OV205", "OV206", "OV207", "OV208", "OV209", "OV210", "OV211", "OV212", "OV213", "OV214", "OV215", "OV216", "OV217", "OV218", "OV219", "OV220", "OV221", "OV222", "OV223", "OV224", "OV225", "OV226", "OV227", "OV228", "OV229", "OV230", "OV231", "OV232", "OV233", "OV234", "OV235", "OV236", "OV237", "OV238", "OV239", "OV240", "OV241", "OV242", "OV243", "OV244", "OV245", "OV246", "OV247", "OV248", "OV249", "OV250", "OV251", "OV252", "OV253", "OV254", "OV255", "OV256", "OV257", "OV258", "OV259", "OV260", "OV261", "OV262", "OV263", "OV264", "OV265", "OV266", "OV267", "OV268", "OV269", "OV270", "OV271", "OV272", "OV273", "OV274", "OV275", "OV276", "OV277", "OV278", "OV279", "OV280", "OV281", "OV282", "OV283", "OV284", "OV285", "OV286", "OV287", "OV288", "OV289", "OV290", "OV291", "OV292", "OV293", "OV294", "OV295", "OV296", "OV297", "OV298", "OV299", "OV300", "OV301", "OV302", "OV303", "OV304", "OV305", "OV306", "OV307", "OV308", "OV309", "OV310", "OV311", "OV312", "OV313", "OV314", "OV315", "OV316", "OV317", "OV318", "OV319", "OV320", "OV321", "OV322", "OV323", "OV324", "OV325", "OV326", "OV327", "OV328", "OV329", "OV330", "OV331", "OV332", "OV333", "OV334", "OV335", "OV336", "OV337", "OV338", "OV339", "OV340", "OV341", "OV342", "OV343", "OV344", "OV345", "OV346", "OV347", "OV348", "OV349", "OV350", "OV351", "OV352", "OV353", "OV354", "OV355", "OV356", "OV357", "OV358", "OV359", "OV360", "OV361", "OV362", "OV363", "OV364", "OV365", "OV366", "OV367", "OV368", "OV369", "OV370", "OV371", "OV372", "OV373", "OV374", "OV375", "OV376", "OV377", "OV378", "OV379", "OV380", "OV381", "OV382", "OV383", "OV384", "OV385", "OV386", "OV387", "OV388", "OV389", "OV390", "OV391", "OV392", "OV393", "OV394", "OV395", "OV396", "OV397", "OV398", "OV399", "OV400", "OV401", "OV402", "OV403", "OV404", "OV405", "OV406", "OV407", "OV408", "OV409", "OV410", "OV411", "OV412", "OV413", "OV414", "OV415", "OV416", "OV417", "OV418", "OV419", "OV420", "OV421", "OV422", "OV423", "OV424", "OV425", "OV426", "OV427", "OV428", "OV429", "OV430", "OV431", "OV432", "OV433", "OV434", "OV435", "OV436", "OV437", "OV438", "OV439", "OV440", "OV441", "OV442", "OV443", "OV444", "OV445", "OV446", "OV447", "OV448", "OV449", "OV450", "OV451", "OV452", "OV453", "OV454", "OV455", "OV456", "OV457", "OV458", "OV459", "OV460", "OV461", "OV462", "OV463", "OV464", "OV465", "OV466", "OV467", "OV468", "OV469", "OV470", "OV471", "OV472", "OV473", "OV474", "OV475", "OV476", "OV477", "OV478", "OV479", "OV480", "OV481", "OV482", "OV483", "OV484", "OV485", "OV486", "OV487", "OV488", "OV489", "OV490", "OV491", "OV492", "OV493", "OV494", "OV495", "OV496", "OV497", "OV498", "OV499", "OV500", "OV501", "OV502", "OV503", "OV504", "OV505", "OV506", "OV507", "OV508", "OV509", "OV510", "OV511", "OV512", "OV513", "OV514", "OV515", "OV516", "OV517", "OV518", "OV519", "OV520", "OV521", "OV522", "OV523", "OV524", "OV525", "OV526", "OV527", "OV528", "OV529", "OV530", "OV531", "OV532", "OV533", "OV534", "OV535", "OV536", "OV537", "OV538", "OV539", "OV540", "OV541", "OV542", "OV543", "OV544", "OV545", "OV546", "OV547", "OV548", "OV549", "OV550", "OV551", "OV552", "OV553", "OV554", "OV555", "OV556", "OV557", "OV558", "OV559", "OV560", "OV561", "LUSC1", "LUSC2", "LUSC3", "LUSC4", "LUSC5", "LUSC6", "LUSC7", "LUSC8", "LUSC9", "LUSC10", "LUSC11", "LUSC12", "LUSC13", "LUSC14", "LUSC15", "LUSC16", "LUSC17", "LUSC18", "LUSC19", "LUSC20", "LUSC21", "LUSC22", "LUSC23", "LUSC24", "LUSC25", "LUSC26", "LUSC27", "LUSC28", "LUSC29", "LUSC30", "LUSC31", "LUSC32", "LUSC33", "LUSC34", "LUSC35", "LUSC36", "LUSC37", "LUSC38", "LUSC39", "LUSC40", "LUSC41", "LUSC42", "LUSC43", "LUSC44", "LUSC45", "LUSC46", "LUSC47", "LUSC48", "LUSC49", "LUSC50", "LUSC51", "LUSC52", "LUSC53", "LUSC54", "LUSC55", "LUSC56", "LUSC57", "LUSC58", "LUSC59", "LUSC60", "LUSC61", "LUSC62", "LUSC63", "LUSC64", "LUSC65", "LUSC66", "LUSC67", "LUSC68", "LUSC69", "LUSC70", "LUSC71", "LUSC72", "LUSC73", "LUSC74", "LUSC75", "LUSC76", "LUSC77", "LUSC78", "LUSC79", "LUSC80", "LUSC81", "LUSC82", "LUSC83", "LUSC84", "LUSC85", "LUSC86", "LUSC87", "LUSC88", "LUSC89", "LUSC90", "LUSC91", "LUSC92", "LUSC93", "LUSC94", "LUSC95", "LUSC96", "LUSC97", "LUSC98", "LUSC99", "LUSC100", "LUSC101", "LUSC102", "LUSC103", "LUSC104", "LUSC105", "LUSC106", "LUSC107", "LUSC108", "LUSC109", "LUSC110", "LUSC111", "LUSC112", "LUSC113", "LUSC114", "LUSC115", "LUSC116", "LUSC117", "LUSC118", "LUSC119", "LUSC120", "LUSC121", "LUSC122", "LUSC123", "LUSC124", "LUSC125", "LUSC126", "LUSC127", "LUSC128", "LUSC129", "LUSC130", "LUSC131", "LUSC132", "LUSC133", "LUSC134", "LUSC135", "LUSC136", "LUSC137", "LUSC138", "LUSC139", "LUSC140", "LUSC141", "LUSC142", "LUSC143", "LUSC144", "LUSC145", "LUSC146", "LUSC147", "LUSC148", "LUSC149", "LUSC150", "LUSC151", "LUSC152", "LUSC153", "LUSC154" ], "data": [ [ 2.87, 2.17, 1.32, 1.84, -6.03, 1.8, -4.88, -3.14, 2.03, -0.29, -0.8, 2.65, -2.31, -3.76, 3.93, -1.28, 1.62, 1.87, 1.47, -2.96, 2.35, 2.46, 3.37, 2.98, 2.26, 1.99, 1.54, 1.48, 0.04, 2.31, -2.2, 1.82, -1.29, 2.98, 2.13, 1.99, 1.38, 1.8, 1.83, 2.95, 0.79, 2.79, 2.64, 1.71, 1.55, 1.76, 1.22, -0.08, 2.83, 1.83, 0.36, -1.74, 2.09, 0.34, 0.11, 0.16, 2.12, 2.37, 1.94, 1.93, 1.7, 2.25, 2.19, -2.94, 1.73, 1.43, 2.02, 2.33, -4.57, 1.43, 1.97, -2.71, 1.5, 3.14, 0, -1.03, 1.22, 0.32, 2.48, 2.42, -3.25, 2.84, -1.06, -3.86, 1.93, -1.75, -0.43, -4.4, -0.75, 2.26, 3.32, 0.79, 1.6, 0.89, 1.1, 2.38, 2.67, 3.71, 2.98, 1.44, 1.71, 1.62, 2.6, 2.12, -4.39, 1.21, 1.93, 1.74, 1.54, 2.47, 2.15, -2.05, 1.14, -0.93, 2.73, -0.69, 0.89, 3.15, 1.48, 2.34, 3.87, 2.33, 2.07, 1.82, 2.42, 2.15, 2.65, 1.51, 3.34, -1.37, 1.4, 2.34, 0.88, 2.41, 0.92, -3.77, 2.63, 2.28, -1.91, 2.44, 2.42, 3.51, 1.58, 2.27, -0.4, 2.12, 1.46, 1.27, 2.35, 2.36, 2.38, 2.13, 2.24, 2.27, 0.34, 2.35, 3.49, 0.92, 2.12, 1.64, 1.98, 2.04, 2.52, 1.12, 0.01, 2.45, 2.7, 2.84, 1.7, 1.9, -1.11, 1.96, 2.49, 1.52, 1.69, 3, 2.47, 1.72, 2.17, 1.87, -0.16, -4.94, 1.62, 2.24, -1.6, 1.8, 0.36, 1.29, -3.62, 2.7, 3.17, -3.62, 2.37, 0.27, 1.25, 2.32, 2.31, 1.19, 1.27, -1.87, 2.18, 2.38, -1.71, 2.05, 0.86, 1.68, 2.57, 2.28, 1.54, 2.82, -2.24, 2.4, 1.4, 1.7, 2.33, 2.72, 2.27, 2.76, 2.84, 2.18, -1, 2.18, 0.15, 3.51, -3.02, 3.15, -0.58, 2.47, 2.52, 1.66, 1.43, 2.07, 0.78, 1.52, -0.55, 1.58, 3.77, 2.97, 2.33, -1.8, 2.26, -1.76, 2.84, 3.25, -2.92, -0.21, 2.57, 2, 2.55, -1.18, 1.81, -0.19, 1.69, 3.4, 2.86, -1.16, 0.67, -5.25, 2.84, 2.95, -2.98, 2.47, -0.72, 1.99, 2.35, 1.22, 1.59, 3.29, -3.1, 1.86, 2.78, -1.23, -0.53, -0.77, -0.67, 1.67, 1.39, 1.22, -1.23, 2.42, -0.41, -0.16, 1.72, 1.31, 1.36, 1.56, 1.56, 1.15, -2.38, -2.09, 2.66, -3.81, 1.64, 1.9, 2.51, 2.52, 2.38, 1.36, 2.6, 2.54, -1.99, -0.13, 2.47, 1.1, 2.33, 2.14, -0.55, 0.44, 3.11, 2.13, 2.25, 3.48, 2.93, 2.87, 1.48, 2.21, 1.46, 0.94, -0.69, -1.36, 1.88, 2.62, 1.9, 2.65, 1.89, -1.39, 1.62, 2.04, 2.57, -0.77, 2.65, 1.9, 2.99, 0.12, 1.53, -3.82, 0.46, 0.44, 2.02, 2.45, 2.4, 2.07, 1.58, -0.14, 1.86, -3.23, 2.02, -0.34, 0.65, -2.32, 2.63, -2.58, 0.06, 2.3, -0.5, 1.83, 2.48, 1.23, -2.03, 1.01, 2.17, -0.28, 1.28, 2.49, 0.18, 1.1, 0.51, 2.27, 1.59, 0.92, 2.07, 2.5, 1.25, 2.28, -0.24, -1.13, 0.27, 0.65, 0.83, 2.25, 2.01, -0.16, -0.57, 2.6, -3.36, 2.39, 1.47, 1.63, 0.2, 2.32, 1.21, 0.54, 2.31, 0.62, -0.79, 0.44, 1.71, 1.05, 2.54, 0.48, 2.2, 2.24, 0.3, 2.14, 0.53, 2.02, 0.55, 1.42, 0.85, 0.58, 0.5, 1.98, 2.97, 0.38, 2.49, 2.48, 3.88, 2.52, -1.17, 2.87, 1.82, 2.16, 2.64, 2.68, 1.23, 1.54, 1.66, 0.6, 1.19, -3.09, 2.39, 0.36, 2.65, 2.06, 1.81, 2.14, 0.52, 2.51, 1.49, 2.09, 2.01, 1.3, 2.48, 2.28, 1.21, 0.03, 1.98, 2.77, 1.73, 1.71, -3.23, 1.62, 0.63, 1.29, 2.51, -0.36, 2.25, 0.97, -1.01, 0.77, 1.34, 0.14, 1, 0.4, 2.01, 0.22, 2.05, 1.22, -1.49, 0.09, 2.28, -0.02, 2.75, 1.93, 0.03, 1.68, 0.36, -2.18, -3.45, 1.39, 2.24, 0.66, 2.57, 0.01, 1.53, 1.91, 0.31, 1.51, 0.23, -2.97, -1.5, -0.7, 3.43, 2.95, 0.2, -3.54, 0.36, 1.99, 2.42, -1.07, 2.07, 3, 1.84, 1.09, 1.05, -0.7, 1.51, 2.98, -1.12, -0.36, 1.67, 2.19, 1.23, 1.01, 2.05, 1.69, 1.84, 2.46, -1.04, -3.91, 2.72, 1.59, -1.39, -2.5, 2.38, 2.23, -3.5, 2.42, 2.39, 2.39, 1.03, 1.82, 3.55, 2.72, 1.79, 3.11, 2.31, 0.41, 1.91, 0.36, 2.86, -2.52, 2.63, 2.69, 1.54, 2, -2.53, -2.21, 2.57, -0.44, 1.34, -0.19, 1.36, 2.95, 3.5, 2.62, -3.9, -2.85, -0.96, 1.02, 1.24, 1.53, 2.91, 1.64, 1.61, 2.11, 2.94, 2.69, -1.88, 2.86, 2.03, 2.59, 1.84, 1.91, -0.7, 1.68, 2.23, 1.7, 2.82, 2.62, 3.39, -1.3, -2.81, 1.24, 3.05, -1.2, 0.15, -2.94, 1.47, 1.7, -3.46, -3.69, -3.81, -4.35, -4.53, -3.88, -3.45, -4.25, -3.15, -4.25, -3.99, -3.43, -3.21, -4.31, -4.46, -4.13, -3.83, -5.24, -4.72, -4.29, -3.51, -4.22, -5.55, -4.7, -4.72, -4.44, -5.3, -5.27, -4.52, -5.96, -4.5, -4.01, -4.98, -4.66, -3.73, -2.66, -4.68, -2.27, -4.87, -3.95, -4.51, -3.62, -2.9, -3.66, -3.83, -4.78, -4.7, -4.56, -4.69, -4.22, -4.15, -4.49, -4.53, -4.32, -4.66, -4.95, -4.89, -5.23, -5.34, -4.7, -4.23, -4.85, -4.52, -5.21, -5.35, -5.58, -3.84, -4.48, -5.71, -5, -5.15, -4.16, -3.19, -3.48, -3.47, -3.53, -3.8, -3.84, -3.28, -2.39, -3.36, -3.4, -3.7, -3.34, -3.36, -2.76, -3.5, -3.41, -3.91, -3.62, -3.67, -3, -2.97, -3.8, -3.06, -3.17, -3.53, -3.49, -3.33, -3.95, -2.08, -4.34, -3.8, -3.26, -3.47, -3.57, -3.94, -3.36, -3.5, -5.02, -3.28, -4.09, -3.82, -3.95, -3.08, -3.61, -4.17, -3.66, -3.33, -4.13, -3.47, -3.36, -3.59, -4.75, -4.72, -4.67, -5.96, -3.1, -3.19, -4.41, -3.99, -4.42, -3.92, -4.38, -4.8, -4.09, -3.83, -4.1, -5.21, -4.52, -4.41, -4.21, -5.2, -3.09, -4.77, -4.91, -4.5, -4.4, -4.34, -3.85, -4.46, -4.23, -4.12, -5.03, -5.24, -4.01, -4.23, -4.42, -4.28, -2.76, -4.56, -4.39, -5.07, -4.5, -4.56, -4.41, -4.72, -4.7, -4.21, -5.21, -4.31, -3.82, -4.56, -3.91, -3.84, -3.27, -3.66, -5.78, -4.83, -4.37, -4.35, -3.17, -3.69, -2.65, -3.81, -4.16, -4.18, -4.14, -5.06, -4.11, -4.85, -4.86, -3.51, -4.7, -5.18, -4.21, -3.97, -5.47, -3.54, -3.46, -4.04, -3.98, -3.88, -4.25, -3.94, -3.91, -3.62, -4.44, -5.9, -4.86, -3.71, -4.39, -3.75, -5.39, -3.52, -4.47, -2.92, -4.29, -4.35, -3.45, -3.5, -3.86, -3.41, -4.55, -3.58, -3.5, -4.68, -3.57, -3.87, -5.09, -4.69, -4.43, -4.95, -5.45, -4.14, -4.35, -5.3, -4.53, -4.54, -4.28, -5.12, -5.67, -4.95, -3.53, -3.85, -4.62, -3.29, -2.79, -4.2, -5.18, -4.07, -4.04, -4.84, -4.64, -4.88, -4.19, -4.08, -5.18, -4.46, -4.86, -4.19, -5.04, -3.64, -4.14, -4.47, -4.16, -3.32, -4.83, -3.31, -4.13, -3.63, -3.95, -5.25, -5.14, -5, -4.63, -4.71, -3.48, -4.4, -5.24, -3.95, -4.34, -4.6, -3.78, -3.99, -4.25, -4.49, -3.76, -4.55, -4.08, -3.57, -4.05, -3.41, -5.69, -4.99, -5.08, -5.18, -5.06, -4.91, -4.14, -5.69, -5.73, -4.23, -5.01, -4.36, -5.91, -4.44, -4.25, -3.79, -4.12, -4.57, -4.46, -3.5, -4.52, -4.74, -4.44, -4.39, -4.1, -4.58, -4.24, -5.84, -4.6, -3.79, -4.55, -0.94, -4.63, -4.33, -5.25, -4.48, -5.77, -4.26, -4.71, -2.99, -3.93, -3.94, -4.86, -5.48, -3.68, -4.17, -3.68, -3.82, -4.34, -4.81, -4.51, -4.67, -4.21, -4.26, -3.44, -3.99, -3.53, -4.22, -3.27, -4.32, -4.6, -5.59, -3.66, -4.55, -4.82, -5.17, -3.83, -5.16, -4.19, -4.22, -4.72, -4.65, -3.93, -3.79, -4.68, -4.74, -4.2, -4.82, -4.1, -4.71, -4.78, -3.94, -3.17, -3.97, -4.88, -3.94, -4.85, -4.37, -3.95, -3.74, -4.36, -4.61, -4, -4.11, -3.95, -4.43, -4.54, -5.05, -4.46, -4.34, -4.96, -3.96, -3.75, -4.29, -4.93, -4.91, -4.98, -3.6, -4.12, -3.94, -4.41, -3.66, -4.09, -3.59, -4.47, -5.03, -4.49, -4.73, -4.07, -4.63, -4.13, -4.38, -3.93, -5.42, -3.29, -4.8, -4.02, -3.68, -4.55, -4.18, -3.03, -4.85, -2.59, -3.45, -3.54, -4.45, -4.09, -2.86, -3.57, -3.69, -4.1, -4.52, -4.34, -4.71, -4.3, -3.89, -4.45, -3.37, -3.7, -4.29, -4.27, -5.07, -4.5, -4.42, -5.1, -3.93, -4.84, -4.49, -4.12, -4.27, -3.99, -4.46, -4.01, -4.13, -4.25, -4.42, -5.12, -3.89, -3.95, -4.14, -4.5, -4.75, -3.49, -4.73, -3.98, -4.27, -3.72, -4.46, -4.66, -3.18, -3.94, -3.76, -4.12, -5.25, -3.29, -3.39, -4.92, -4.88, -4.05, -2.78, -5.12, -2.89, -4.09, -4.45, 1.4, -4.99, -4.78, -5.5, -3.81, -3.9, -5.76, -5.68, -4.69, -3.99, -4.22, -4.11, -3.26, -3.98, -3.76, -4.87, -5.2, -4.48, -4.37, -4.43, -4.04, -4.51, -5.04, -4.09, -4.5, -5.54, -4.39, -4.3, -3.5, -3.55, -3.83, -4.11, -2.1, -4, -4.35, -4.32, -4.5, -4.18, -3.84, -4.28, -3.72, -3.33, -5.25, -4.13, -4.05, -4.57, -5.71, -4.55, -4.21, -4.34, -4.47, -5.19, -4, -4.69, -3.33, -5.43, -3.39, -4.4, -3.49, -4.36, -4.89, -5.37, -4.58, -2.61, -5.21, -4.38, -5.15, -5.02, -3.89, -1.59, -4.31, -3.53, -3.85, -3.96, -3.66, -4.77, -4.48, -0.55, -2.25, -4.28, -4.53, -2.23, -4.39, -4.18, -3.42, -4.47, -4.17, -3.27, -4.02, -3.36, -4.09, -4.2, -4.77, -3.82, -4.38, 1.2, -3.85, -4.6, -3.71, -1.57, -2.34, -2.17, -4.84, -3.54, -1.78, -3.01, -3.77, -3.66, -3.08, -4.39, -3.96, -3.9, -2.68, -3.38, -3.88, -4.16, -3.88, -2.97, -4.44, -4.9, -3.9, -3.58, -2.6, -3.12, -4.87, -3.83, -3.88, -1.86, -4.58, -4.39, -2.8, -3.44, -4.26, -4.41, -3.81, -3.56, -3.78, -4.41, -3.58, -4.16, -4.08, -4.05, -4.33, -3.5, -4.37, -4.19, -3.34, -3.08, -4.38, -3.7, -4.37, -4.5, -2.06, -3.81, -3.54, -4.82, -3.8, -4.26, -3.25, -2.75, -4.94, -3.49, -4.48, -4.23, -4.76, -4.8, -4.18, -3.96, -4.46, -4.27, -3.41, -4.12, -4.66, -3.88, -3.4, -4.01, -4.48, -3.09, -4.72, -4.76, -2.94, -3.67, -4.68, -3.95, -4.32, -3.03, -3.36, -4.03, -3.11, -4.5, -3.29, -4.13, -4.43, -2.1, -4.82, -4.87, -3.56, -4.54, -4.58, -4.44, -4.48, -4.75, -4.22, -3.07, -3.64, -2.98, -2.57, -4.4, -4.33, -3.23, -4.12, -4.34, -3, -5.06, -3.36, -4.29, -0.8, -2.32, -3.83, -3.68, -4.05, -2.87, -3.95, -4.74, -3.53, -3.05, -3.72, -4.42 ], [ 1.36, 0.43, 1.31, 0.81, 0.25, 1.31, -0.24, -1.24, 1.21, 0.29, -0.17, 0.58, 0, 0.46, 0.71, 0.37, 0.92, -0.04, 0.15, 1.36, 0.94, 0.64, -0.14, 0.27, 0.47, 0.98, 0.46, 0.86, 0.64, 0.98, 0.34, 1.1, -0.7, 0.27, -0.13, 1.36, 0.78, 1.25, 0.73, 1.03, 1.12, 1.01, 0.55, 0.93, 1.34, 1.09, 0.4, 1.11, 0.56, 0.34, 0.82, 1.22, 1.25, -0.55, 0.39, -1.12, 1.27, 1.14, 0.94, 1.39, 1.09, 2.2, 0.86, -1.58, 0.85, 0.28, 0.9, 0.2, 0.66, 1.19, 1.56, 0.46, 0.42, 0.62, 1.69, 0.05, 1.57, 0.88, 0.62, 0.02, 2.04, 0.92, 0.48, 1.7, -0.42, -0.43, 1.75, 0.83, 1.92, 1.07, -0.1, -0.26, 0.84, -0.04, 0.53, 1.04, 0, 0.65, 0.52, 0.73, 0.59, 0.67, 0.44, 0.89, -0.18, 0.77, 1.09, 0.88, 1.23, 0.96, 0.67, 0.73, 0.76, -0.1, 1.31, 0.55, 0.83, 0.82, 0.04, 0.48, 0.5, 0.41, 1.18, 0.89, -0.73, 0.29, 0.73, 0.08, 0.56, 0.73, 0.63, 0.87, 0.56, 1.05, 0.56, 0.42, 1.1, 0.52, 0.57, 0.94, 0.86, 0.61, 0.79, 0.46, 0.35, 1.18, 1.66, 0.55, 1.13, 0.71, 0.52, 0.68, 1.07, -0.21, 1.4, 0.77, -0.11, -1.34, 1.29, 0.68, 1.14, -0.5, 0.57, 0.48, -0.02, -0.37, 0.89, -0.77, 0.84, 0.97, 0.64, 0.77, 0.45, 0.64, 0.73, -0.07, 0.98, 0.67, 1.49, 1.17, 0.61, -2.4, 0.49, 0.15, 0.79, -0.3, -2.32, 0.69, -1.7, 1.07, 1.17, 2.18, 1.04, 0.13, 1.32, 1.01, 0.34, 0.55, 0.85, 0.45, 0.34, 0.53, 1.14, 0.96, 1.23, 1.61, 0.3, 0.69, 0.75, 0.92, 0.26, 0.75, 1.53, 1.14, 0.34, -0.4, 0.18, 0.15, 0.9, -0.26, 1.45, 0.85, -1.62, 0.55, 0.91, 1.13, -0.97, 0.01, 1.04, 0.93, 0.73, 0.26, 0.55, -1.66, 1.14, 1.61, 1.16, 1.22, 1.72, -0.26, 1, -1.53, 0.59, 0.35, -0.35, -0.29, 1.68, 1.02, 0.69, 0.23, 0.88, -0.06, 0.34, 0.44, 0.3, -0.22, 0.2, 0.71, 0.63, 0.65, 0.92, 0.41, 0.72, 0.45, 0.06, 1.55, 1.43, 1.03, -0.62, 0.41, 1.26, 0.59, -0.59, -0.73, 0.7, 1.03, 1.99, 0.84, 1.25, 1.34, 0.89, 0.76, 0.16, 1.4, 1.41, 1.1, 1.13, 0.8, -1.03, 1.33, 0.47, 0.43, -0.36, 1.59, 1.28, -0.62, 1.81, 0.9, 0.66, 0.83, 0.59, 0.29, 1.31, 0.58, -0.08, 0.76, 0.21, 0.6, 0.82, 0.91, 1.48, -0.51, 0.65, 1.01, 1.14, 1.21, 1.14, 0.27, 0.55, -0.3, 0.89, 0.64, 1.07, 1.35, 0.6, 0.12, 0.9, 1.22, 0.41, -0.88, 1, 1.26, 0.45, 1.16, 1.42, 0.96, 0.38, 1.27, 1.19, 1.2, 0.82, 0.78, 0.94, -0.48, 0.92, 2.8, 1.29, 0.8, 0.6, 2.02, 1.35, 0.91, 1.32, 0.79, 2.32, 1.09, 1.33, 0.5, 0.71, 1.36, 0.78, 0.26, 1.19, 0.86, 0.8, 0.76, 0.93, 0.99, 1.38, 0.92, 0.54, 1.2, 1.54, 1.11, 0.74, -0.45, 1.42, 1.64, 1.19, -0.6, 1.08, 1.56, 1.86, 0.73, 2.25, 1.83, 0.76, 1.44, 1.34, 1.32, 1.59, 1.2, 1.53, 1.41, 0.86, 1.27, 0.73, 1.33, 1.17, 1.41, 1.21, 0.5, 1.45, 1.48, 0.68, 0.7, 1.28, 0.96, 1.37, 0.46, 1.07, 1.14, 0.96, 0.49, 1.51, 0.21, 0.92, 0.95, 0.72, 0.37, 0.48, 0.95, 0.35, 1.13, 0.46, 1.45, 0.8, 1.14, 1.17, 1.67, 0.34, 0.68, 1.19, 0.78, 0.93, 1.13, 1.03, 1.3, 1.61, 0.27, 1.06, 0.49, 0.53, 0.52, 0.96, 0.58, 1.39, 1.18, 0.45, 1.14, -0.66, 1, 0.6, 0.34, 1.12, 1.84, 1.26, 2.47, 0.42, 1.45, 1.51, 1.24, 1.74, 1.66, 0.96, 1.64, 0.88, 1.44, 0.23, 2.32, 0.75, 1.29, 0.86, -0.18, 1.6, 0.41, 0.13, 1.21, 1.69, 1.02, 1.46, 1.6, 0.91, 1.16, 0.98, 1.43, 1.74, 1.12, 1.98, 1.13, 1.18, 1.22, 0.5, 0.52, 1.82, 1.34, 1.3, 1.29, 0.22, 0.4, 0.47, 0.12, 1.5, 1.76, 0.82, 0.3, 0.38, 0.7, -1.33, 1.03, 0.39, -0.32, 1.13, 1.33, 0.82, 0.98, -0.72, 0.34, 0.3, 0.15, 1.5, 0.85, -0.18, 0.78, 0.87, 0.77, 1.57, 0.63, 0.8, 0.76, 0.59, 0.58, 0.82, -0.22, -0.11, 0.85, 0.89, -1.99, 1.48, 0.63, -0.34, 0, 0.36, 0, 1.11, 0.89, 0.05, 1.08, -0.08, 1.28, 0.93, 1.36, 0.68, 0.47, 0.9, 0.9, 1.78, 0.27, -0.46, -0.11, 1.26, 0.92, 0.78, 1.97, 1.85, 1.48, 0.53, 0.46, 2.69, 1.64, 1.28, 1.2, 0.93, 0.99, 2.5, 0.72, 0.22, 0.35, 0.36, 0.69, 0.15, 1.65, 0.88, 0.74, 0.76, -0.51, 0.64, 2.64, 1.45, 0.24, 0.82, 0.86, 0.88, -1.9, 0.79, -0.54, 0.6, 0.83, 0.65, 0.55, 0.23, -0.01, -0.38, 0.03, -0.21, 0.02, 1.27, -0.14, 1.14, 0.43, 1.23, 1.54, 0.45, 0.75, -0.59, -1.35, -0.75, -0.01, -0.05, 0.54, 0.51, 1.91, 1.64, 0.31, -0.11, 1.05, 1.14, -0.01, 0.39, 0.94, -0.03, 0.61, 0.66, 0.9, -0.43, 0.41, 0.78, 1.01, 1.19, 0.92, -0.65, 1.6, 0.75, -0.08, 1.21, 1.12, 1.12, 0.66, -0.1, 0.09, -0.05, 1.6, -0.37, 0.43, 0.36, 0.32, 1.81, 0.41, 1.27, 1.66, 0.71, 0.65, -0.01, 1.05, 0.52, -0.35, 0.35, 0.28, 0.11, -0.79, -0.75, 0.5, -0.63, -2.75, -0.52, -0.34, 0.71, 0.82, 0.24, 0.88, 0.84, -0.27, 0.03, 1.18, -0.02, 0.38, -0.34, -0.4, 1.27, 1.22, -0.14, 0.56, 0.19, 1.17, -0.23, 0.07, 1.27, 0.4, -0.46, -2.72, 0.13, 1.24, 0.28, -0.11, -0.23, -0.44, -0.39, 0.73, 0.63, -0.67, -0.34, -0.07, -1.13, 0.35, 1.23, -0.45, -0.61, 0.68, 1.38, 1.93, 1.28, -0.16, 0.64, 0.24, 1.43, 0.83, 0.85, 0.7, 1.02, -0.4, -0.18, 0.16, 0.98, -0.7, 0.84, 1.06, 0.12, -1.31, 0.26, 0.09, 0.38, -0.58, 1.11, 0.97, -0.02, 0.43, -1.26, -1.92, 1.12, 0.24, 1.36, 1.29, 1.27, 1.37, -0.99, 0.48, 0.35, 0.17, 1.09, 0.62, 0.8, 0.87, 1.48, 1.04, 0.81, -1.17, 1, -0.04, 0.58, 0.13, 0.49, 0.51, 0.24, -0.21, -0.3, 0.97, 0.98, 1.23, 1.43, 0, 0.14, -1.54, -0.48, 0.21, 0.64, 0.44, 1, 0.01, 0.5, 0, 1.43, -0.43, 0.69, 1.81, 0.13, -0.17, 0.23, 0.29, 0.45, 0.34, 0.79, 0.55, -0.82, 1.57, -1.7, 0.74, -0.02, 0.88, 0.42, 0.2, -0.91, 0.42, 1.37, 0.35, 0.19, 0.08, -2.61, 0.34, 0.55, 1.01, 1.21, 1.07, 0.73, 1.5, 1.05, 0.89, 0.27, 1.2, 0.63, 1.15, 1.19, 0.97, 0.48, 1.01, 1.48, -0.17, 0.99, 0.36, 0.12, 0.09, 0.3, 0.76, 0.48, 0.87, 0.04, 1.35, 1.95, 2.08, 1.06, 0.26, 1.01, 0.07, 1.33, 1.22, 0.77, 0.6, 1.28, 0.8, 1.56, 0.45, 1.01, 0.88, -1.06, 1.32, 1.09, 1.35, 1.81, 1.55, 0.32, 0.44, 0.92, 0.91, 0.21, 1.38, 0.37, 0.69, 0.7, 0.95, 0.07, 0.87, -0.62, 0.73, 1.33, 0.86, -1.11, 1.32, 0.47, -0.03, 0.59, 1.04, -1.94, 0.82, 1.21, -1.67, 0.55, 0.41, 0.48, 1.75, -1.24, 1.11, 1.65, 0.16, 0.21, 1.27, 0.65, 0.4, 1.15, 0.48, -1.42, 0.51, 0.29, 0.87, 1.77, 0.7, 0.97, 1.67, 1.49, 0.76, 0.74, 0.72, 1.27, 1.88, 0.15, 0.92, 0.97, 1.42, 0.12, 1.32, 1.92, 1.65, 1.2, 0.96, 1.21, 0.92, 0.19, 1.26, 1.87, 1.65, 1, -0.3, -0.29, 0.19, 1.91, 0.77, -1.46, 0.69, 0.76, 0.34, 1.73, 1.16, 0.8, 1.48, 1.5, 1.27, 1.56, 0.92, 0.77, 1.26, -0.18, 1.7, 1.46, 1.11, 1.4, 1.82, -0.02, -0.14, 0.26, 0.43, 0.49, -0.12, 0.28, 0.3, 0.77, 1.77, 0.28, -0.04, 1.29, 1.13, 0.27, -0.39, 1.19, 0.23, 0.4, 0.7, 1.59, -0.2, 0.74, 0.92, 0.88, 0.28, -0.67, 1.38, 1.04, 0.58, 0.14, 0.87, 0.42, -0.09, 1.48, 0.96, 0.82, 1.16, 1.3, -0.31, 1.01, 0.51, 0.91, 1.7, -0.14, 0.74, 0.93, 0.48, -0.27, 0.48, 0.38, -1.34, 1.77, 0.75, 0.08, 0.5, 1.17, 0.11, 1.84, 1.03, -0.13, 0.16, 0.41, -0.89, 0.64, 1.74, 1.29, 0.34, 1.56, 1.37, -1.38, 1.01, 0.26, 0.97, 0.84, 0.11, 0.83, 1.27, 0.67, 1.92, 1.16, 0.33, 0.37, 0.83, 0.57, 1.23, 0.28, 1.25, 0.5, 1.13, 0.11, 0.4, 0.33, 1.27, 1.37, -0.5, 0.61, 0.31, 1.23, 1.4, 0.18, 0.61, 0.2, 0.68, 0.32, 0.96, 1.68, 1.34, 0.21, -0.17, 0.84, -0.02, 2.73, 0.04, 1.03, 1.79, 1.7, 1.27, 1.26, 1.53, 0.44, 1.08, 0.74, 1.68, 0.44, -0.07, 0, 1.35, 0.64, -1.4, 1.29, 0.04, 1.04, 0.95, 0.23, -0.12, -0.57, 1.58, 0.13, 0.57, 1.08, 0.96, 1.26, 0.58, 0.51, 0.58, 0.34, 0.86, 1.36, 0.75, 0.66, -0.22, 0.4, 1.2, 1.36, 0.78, 0.38, 1.09, 1.67, 1.35, 0.57, 0.6, -0.08, 1.41, 0.55, 0.34, 0.93, -0.13, 1.39, -1.09, 0.74, 0.91, 1.46, 0.42, 0.66, 2.53, 0.7, 1.11, 1.06, 1.34, 0.72, 0.23, -1.09, 1.37, 0.75, 1.51, 0.7, -0.25, 0.09, 0.57, 1.06, 0.08, 0.26, 0.28, -0.14, -1.08, 0.72, 0.54, 0.86, -0.07, -0.35, 0.21, 0.93, 0.85, 0.7, -1.94, 0.88, 0.58, 0.96, 0.74, 0.49, -0.9, -0.48, 0.25, 0.32, 0.32, 0.36, 1.18, -0.08, -0.99, 0.31, 0.75, -0.16, 1.03, 0.24, -0.21, 0.8, 0.12, 0.1, 0.65, 1.09, -0.3, 0.51, -0.49, -0.25, 0.94, 0.66, -0.43, -0.22, -0.63, 0.52, 0.87, 0.03, 0.84, 0.9, -0.22, 0.53, 0.41, 0.82, 0.79, 0.46, -0.25, 0.28, 0.1, -0.49, 0.69, -1.09, -0.13, 0.14, 0.41, -0.14, 0.28, 0.24, -0.43, -1.51, 0, 0.14, 0.65, -0.4, -0.24, -0.39, 1.47, 1.04, 0.32, 0.9, 0.04, -0.61, 0.28, -1.29, 0.18, 0.77, 0.18, -1.18, 0.97, -0.06, -0.46, 0.27, 1.36, -0.05, 0.39, 0.95, 0.77, -0.04, 0.83, 0.71, 0.36, 1.35, 1.25, 0.52, -0.41, -1.61, 0.12, -0.54, 0.3, 0.54, 0.92, 0.78, 1.21, -0.55, 1.26, -0.38, 0.08, -0.7, 0.34, 0.74, 1.43, 0, 0.46, 0.48, 0.59, 0.16, -0.86, 0.17, 0.77, -1.16, 1.39, 1.03, 0.62, -0.5, 0.48, 1.04, 0.22, 0.23, 0.53, 1.1, -0.43, 0.65, 0.6, 0.44, 0.71, 0.01 ], [ 2.98, 2.55, 3.02, 3.13, -1.45, 4.04, -0.72, -1.19, 2.28, -1.6, 0.33, 3.06, -1.18, -2.44, 2.09, 1.72, 2.01, 1.18, 1.51, -1.15, 0.96, 2.45, 2.45, 3.19, 2.78, 1.44, 1.62, 1.03, 1.14, 0.42, -1.81, 2.16, -0.87, 2.93, 2.48, 2.36, 1.93, 2.48, 1.88, 3.49, 3.35, 1.88, 3.47, 2.53, 2.54, 2.3, 2.71, 0.6, 2.29, 1.88, 1.5, 0.38, 3.7, 0.84, 1.44, -1.75, 2.98, 2.48, 2.68, 1.27, 1.91, 4.03, 2.15, -0.92, 2.74, 2.14, 3.27, 3.73, -1.4, 2.21, 2.26, -1.59, 2.03, 3.44, 3.2, -1.26, 1.05, 1.36, 3.02, 2.3, -1.67, 3.13, -0.78, -1.38, 2.06, -1.21, 0.85, -1.34, -0.07, 3.16, 3.65, 3.21, 1.85, 3.24, 1.28, 2.52, 2.71, 0.9, 1.29, 2.89, 2.24, 1.96, 2.93, 2.01, -1.12, 2.23, 2.88, 3.05, 4.3, 1.48, 3.34, -1, 3.73, -0.52, 3.77, 1.16, 2.3, 3.08, 2.97, 2.77, 3.52, 2.5, 2.29, 2.34, 2.67, 2.84, 2.38, 2.38, 3.45, -0.71, 2.51, 0.35, 2.05, 2.62, 2.29, 0.53, 3.12, 2.5, 1.61, 1.7, 1.73, 3.82, 3.12, 2.97, 2.69, 3.02, 3.33, 1.79, 4.49, 3.17, 3.37, 1.95, 2.83, 3.45, 3.06, 2.46, 3.82, 3.29, 3.13, 2.48, 2.36, 2.32, 3.42, 1.48, 1.63, 1.89, 3.35, 4.03, 1.39, 2.93, 0.74, 3.3, 2.87, 2.92, 3.03, 3.42, 3.76, 2.47, 2.36, 2.81, 1.27, -0.64, 2.28, 2.9, 0.58, 1.83, -1.43, 2.48, -1.38, 1.01, 1.67, -1.11, 2.49, 0.17, 1.68, 2.77, 1.66, 0.58, 2.38, -1.07, 2.03, 1.92, 0.73, 3.51, 1.39, 2.14, 3.94, 2.09, 2.51, 2.36, -0.23, 2.62, 1.67, 1.51, 3.12, 0.25, 3.51, 2.26, 1.92, 2.07, 0.06, 2.39, 2.05, 2.47, -0.26, 2.11, -0.96, 1.91, 2.12, 0.02, 1.73, 0.4, 0.22, 2.43, 1.31, 3.58, 2.69, 3.53, 2.69, -2.28, 2.39, -1.52, 2.81, 1.84, -0.48, -0.57, 1.36, 3.76, 1.88, 2.07, 2.26, -1.64, 3.65, 4.04, 4.34, -0.44, 1.32, 0.12, 2.97, 2.25, -0.87, 2.15, -2.21, 4.32, 2.34, 3.63, 1.22, 3.08, -0.18, 2.09, 2.39, -0.85, -1.83, 0.07, -0.76, 2.08, 2.21, 2.77, 0.55, 2.28, 0.74, -1.91, 2.59, 1.88, 2.32, 2.34, 2.9, 2.29, -1.51, -1.22, 1.58, -1.5, 3.17, 2.82, 2.18, 2.83, 3.13, 1.98, 1.69, 1.78, -0.04, 1.94, 2.94, 0.91, 2.89, 2.71, -1.92, -0.96, 3.07, 2.55, 2.46, 2.61, 3.58, 2.14, 3.43, 3.53, 2.17, 1.02, 0.45, -0.15, 3.78, 2.87, 3.22, 2.84, 2.56, -1.23, 1.95, 2.32, 3, -1.06, 3.14, 3.26, 2.88, 2, 2.58, -0.72, 2.5, 2.08, 2.2, 2.54, 0.87, 2.89, 2.33, 0.16, 1.1, -1.94, 1.91, 2.58, 1.79, -1.34, 3.02, -1.67, 1.55, 2.5, 0.19, 2.66, 3.29, 1.83, -0.9, 1.46, 2.21, -0.98, 1.35, 1.83, 3.25, 2.8, 1.61, 2.58, 0.78, 1.3, 1.51, 3.44, 1.97, 2.35, 0.79, -0.43, 1.86, 1.93, 2.12, 3.8, 2.82, 1.23, 1.83, 2.63, -1.61, 2.28, 2.39, 2.33, 0.7, 1.85, 1.82, 3.05, 2.7, 1.31, -0.43, 1.06, 3.7, 3, 2.46, 3.34, 2.17, 2.29, 2.1, 2.08, 0.65, 2.7, 2.38, 2.23, 2.4, -1.72, 1.75, 3.09, 2.55, -0.88, 3.52, 3.84, 1.81, 2.87, 1.05, 2.88, 1.14, 2.51, 2.64, 0.62, 2.68, 2.21, 3.28, 2.24, 3.19, -1.84, 2.82, 2.07, 2.48, 2.2, 3.25, 4.3, 2.25, 1.11, 2.61, 3.07, 2.29, 0.82, 2.07, 2.63, 2.29, 0.23, 3.32, 3.27, 2.4, 1.82, -0.27, 1.89, -1.04, 2.63, 2.36, 1.19, 1.77, 1.78, -1.97, 1.71, 1.92, 1.64, 1.61, 1.91, 2.97, 0.62, 3.29, 1.38, -0.29, 0.84, 1.56, 1.06, 3.47, 1.59, 1.68, 2.8, 0.83, -1.22, -2.26, 1.39, 1.89, 0.93, 2.6, 1.34, 2.34, 2.32, 1.31, 2.54, 0.86, -0.06, -0.73, 1.13, 2.52, 2.56, 2.67, -0.55, 1.73, 3.15, 1.28, 0.33, 3.27, 3.08, 1.17, 2.71, 1.05, -0.46, -0.76, 3.6, -1.83, 1.68, 1.82, 0.7, 1.51, 1.96, 2.54, 1.31, 1.93, 2.01, -0.71, 0.28, 2.97, 3, 0.98, 0.42, 3.52, 2.91, -0.6, 2.49, 2.66, 3.78, 1.75, 2.91, 2.59, 2.56, 0.7, 4.74, 1.95, -0.7, 1.99, 0.28, 1.86, -1.15, 3.19, 2.56, 2, 1.56, 1.14, -1.62, 2.04, -0.99, 3.53, 0.6, 1.36, 3.03, 3.27, 3.84, -2.08, -1.28, 0.75, 2.26, 1.92, 2.22, 2, 2.52, 2.35, 2.33, 3.4, 1.67, -1.59, 1.83, 2.88, 3.33, 2.24, 3.99, 1.93, 2.41, 0.84, 3.04, 3.99, 2.63, 4.57, -1.46, 1.71, 1.86, 3.12, -1.31, 0.36, -1.66, 2.7, 2.09, -0.6, 0.93, 0, -1.01, 0.16, -0.9, -1.06, -1.52, -0.98, -1.08, -1.72, -0.69, -1.01, -2.36, -0.83, -2.18, -2.05, -1.82, 0.35, -0.34, 0.98, -0.24, -1.29, -0.88, -0.39, -0.8, -2.54, -0.67, -2.46, -2.39, -1.11, -0.22, -1.32, -1.56, -0.8, -2.27, -1.71, -1.93, -2.54, -1.63, 0.15, -0.45, -0.3, -2.46, 0.41, -2.88, -0.26, -1.24, -1.75, -0.45, -0.38, -1.7, -0.1, -0.79, -0.73, -1.09, -0.52, -2.33, -2.31, -1.97, -0.48, -1.74, -0.57, -2.54, -0.39, -0.38, -0.85, -0.89, -1.2, 1.91, -1.05, -0.26, -1.87, -2.82, -2.11, -0.87, -2.59, -2.61, 0.51, 0.02, -1.22, -1.93, 0.68, -0.8, -2.49, -0.51, -1.9, -1.66, -0.4, -2.03, -2.6, -1.7, -1.63, -2.01, -1.56, -1.3, -0.73, -1.29, -0.67, -0.51, -1.58, -0.92, -1.77, -1.09, -0.68, -1.25, -3.19, -2.32, -1.42, 0.38, -1.17, -1.32, -2.2, -1.61, -2.62, 0.22, -1.16, -1.7, -1.2, 0.23, -2.17, -0.03, -0.9, -0.26, -1.87, -0.39, -1.81, -0.49, -2.81, 0.56, -0.4, -1.97, -2.16, -0.45, 0.22, -1.69, 0.16, -1.5, -1.55, -0.52, -0.65, -0.12, -1.9, -1.62, -1.36, -0.86, -1.22, -0.74, -0.98, -1.04, -1.54, -2.01, -0.13, -1.15, 0.59, 0.07, -1.69, -1.53, 1.84, -1.44, 0.24, -0.79, -0.68, -1.28, -2.65, -1.6, -1.31, 0.29, -1.79, -1.78, -1, -0.04, -2.23, 0.16, -0.76, 0.21, 0.02, -2.11, -0.79, -1.52, -1.47, -1.57, -1.26, 0.15, -1.48, -0.86, -1.58, -0.8, -0.44, -1.63, -0.98, -1.67, -0.58, -2.17, -1.5, -0.83, -2.1, -0.64, -0.34, -2.19, -0.54, 0.31, 0.56, -1.24, -0.6, -0.47, -1.68, -1.82, -2.83, -0.26, -0.55, -2.16, -1.29, -1.04, -0.92, -1.08, -1.71, -1.81, -0.18, -1.08, -2.31, -2.08, -1.56, -0.83, -1.45, -1.33, 0.72, -1.8, -1.25, -0.19, -0.12, -0.72, -1.04, -1.13, -0.55, 0.2, -1.01, -0.98, -2.35, -1.35, -1.32, -0.74, -1.56, -1.26, -1.34, -0.74, -0.38, -1.97, -2.01, 0.07, 0.55, -0.96, -2.34, -2.33, -1.81, 0.73, 0.4, -0.28, -0.37, -0.82, -0.69, -1.6, -0.61, -1.93, -0.04, -0.75, -0.54, -2.4, -1.66, -0.42, -0.14, -2.48, 0.02, -0.92, -0.14, -3.03, -1.02, 0.7, -0.21, -0.98, 0.44, -2.17, 1.32, -0.44, -0.95, -2.68, -1.06, -0.05, -1.91, 0.89, -1.01, 0.34, 0.24, -0.68, 1.64, 0.11, -0.44, -0.32, -0.14, 0.45, -2.77, -1.05, -1.72, -1.13, -2.05, -2.1, -1.72, -1.56, -0.24, -0.18, -0.51, -0.54, 0.54, 0.02, -0.39, 0.28, -0.15, -0.62, -0.37, -0.28, -0.73, 0.35, -1.31, -1.17, -1.34, 0.33, -0.13, 0.39, -0.9, -0.22, -0.05, 0.34, -1.71, -1.13, 0.84, -1.17, 0.96, -0.39, 0.05, -0.65, 0.94, -1.21, -0.5, 0.41, -0.18, -1.93, -0.95, -1.29, -0.78, -1.57, -1.24, 0.89, -0.44, -1, -0.31, -0.07, -1.54, -0.42, -1.47, 0.74, -0.92, -1.08, -0.53, -1.46, -2.05, -1.68, -0.01, -0.51, -1.21, 0.66, -0.62, 0.23, -1.39, -0.24, -0.6, -2.57, -1.06, 0.1, 0.35, 0.03, -0.98, 0, -1.66, -1.34, -1.82, -1.43, -0.09, -0.14, -0.35, -1.33, 0.45, -0.71, -0.07, -0.17, 0.3, -1.55, -0.79, -0.68, 0.14, -1.51, -2.35, -0.85, -0.92, -0.12, 0.9, -0.94, 0.62, -0.22, -0.62, -0.6, -0.37, 1.23, -0.36, 0.05, -2.51, -0.63, 0.32, -2.05, -0.33, 0.51, -1.91, -1.46, -0.16, -1.21, 0.09, -0.05, -0.92, -0.82, -1.09, -1.81, -2.22, -0.17, -1.04, -1.96, -0.61, 0.04, -0.45, -0.3, -0.79, -0.38, 0.44, -0.59, -0.15, -0.7, -0.86, 0.59, -0.61, -2.44, 0.24, -1.73, -0.53, -0.78, 0.84, -0.7, -2.5, -2.4, 0.44, -0.33, -0.18, -1.3, -0.9, -2.01, -0.84, -0.17, -1.85, 0, -0.4, -0.72, -0.86, -1.94, -1.94, -1.36, -1.07, -1.59, -0.27, -0.84, -0.96, -0.34, -0.66, -2.23, 0, 0.14, -0.52, 0.32, -1.56, -0.76, -0.25, -0.47, -1.64, 0.69, 0.06, -0.19, -0.79, -0.79, -1.3, -1.35, 0.26, 0.72, -2.09, 0.29, -0.69, -2.11, -2.39, -0.4, -0.89, -0.27, -0.68, -0.6, -1.21, -0.3, -0.34, 0.19, -1.31, -0.55, 0.8, 0.38, -1.28, -1.8, -1.74, 1.68, -1.36, 0.4, -1.46, -1.95, -1.85, -0.8, -1.06, -0.07, -2.37, -1.95, 0.16, -1.43, -1.08, -1.13, -0.1, 0.72, 0.42, -1.3, -0.5, -1.81, -2.46, -2.11, -0.83, 1.3, -0.6, -0.79, -1.24, 0.26, -0.78, 1.72, -1.22, -0.62, -1.68, -2.62, -0.15, -0.15, -2.26, -1.04, -1.26, -1.11, -0.48, 0.88, 0.53, -0.02, -0.72, -0.61, -0.38, -0.47, -1, 0.17, 0.02, -1.14, 0.87, -1.02, -0.78, -0.55, 0.5, -0.59, -0.62, -0.92, -0.28, 0.23, -1.25, -0.68, -1.99, -1.29, 0.99, -0.48, -0.34, -0.51, 0.8, -0.39, 1.87, 2.19, -1.05, -0.27, 0.39, 0.3, 1.84, -0.27, 0.46, -0.03, -0.29, -0.47, -0.25, -0.34, -1.22, -1.04, -0.47, -0.45, -1.3, -1.66, -0.59, -1.29, -1.45, -0.8, -0.03, 0.55, -0.11, -0.6, -0.76, -0.92, -0.61, -1.14, -1.04, 0.1, -0.37, 0.38, -1.05, -0.74, -0.67, -1.06, 0.14, -0.64, 1.85, 1.7, -0.53, -1.12, -1.46, 0.78, -0.8, 0.92, -0.88, 0.1, 0.25, 1.24, 0.44, -1.29, -0.4, -0.52, -1.81, -1.55, 1.13, -0.22, -0.44, -0.41, 0.99, -0.16, -0.8, -0.6, -0.66, 1.09, -0.57, 0.63, -0.47, -2.29, -0.08, -0.61, 0, -0.5, 0.2, -0.96, 0.21, -1.85, -1.14, -0.74, -0.7, -0.02, -0.01, -0.04, 0.62, -0.09, 1.03, -0.76, 0.2, -1.13, -0.12, -1.39, -0.09, -0.58, -1.07, 0.31, -1.28, 0.13, -0.01, 0.76, 0.17, -0.07, -0.96, -0.09, 0.63, -0.73, 0.52, 1.54, -0.69, -0.41, 0.97, -0.25, 0.19, 0.36, -0.5, -2.11, -0.8, -0.69, -0.64 ] ] }, "x": { "dataset": [ "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "BRCA", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "OV", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC", "LUSC" ] } }, config={ "binned": True, "graphOrientation": "vertical", "graphType": "Dotplot", "groupingFactors": [ "dataset" ], "histogramBins": 150, "jitter": False }, width=613, height=613, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="dotplot_11.html") ###Output _____no_output_____
mninst_tf2.ipynb
###Markdown ###Code !pip install tensorflow==2.0.0 import tensorflow as tf import tensorflow as tf import tensorflow.keras as keras import tensorflow.keras.layers as layers import numpy as np import os # Tensorflow 2.0 Eager execution # mnist / using tf.keras class Trainer(object): def __init__(self, inputs, outputs): self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.accuracy = tf.keras.metrics.SparseCategoricalAccuracy() self.optimizer = tf.keras.optimizers.Adam() self.model_path = './my_model' self.build_model(inputs, outputs) self.checkpoint_prefix = os.path.join(self.model_path, "ckpt") self.ckptroot = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) def build_model(self, inputs, outputs) -> None: inputs_ = layers.Input((inputs,)) x = layers.Dense(128, activation="relu")(inputs_) x = layers.Dense(64, activation="relu")(x) x = layers.Dense(outputs, activation="softmax")(x) self.model = tf.keras.models.Model(inputs_, x) @tf.function def train_step(self, x, y) -> float: with tf.GradientTape() as tape: prediction = self.model(x, training=True) loss = self.loss(y, prediction) graidents = tape.gradient(loss, self.model.trainable_weights) self.optimizer.apply_gradients(zip(graidents, self.model.trainable_weights)) self.accuracy.update_state(y, prediction) return loss def train(self, dataset, epochs) -> None: for epoch in range(epochs): self.accuracy.reset_states() for step, (x, y) in enumerate(dataset): loss = self.train_step(x, y) if step % 100 == 0: print("epoch = {} step = {} loss = {} accuracy = {}".format(epoch, step, loss, self.accuracy.result())) def save(self) -> None: self.ckptroot.save(self.checkpoint_prefix) def load(self) -> None: self.ckptroot.restore(tf.train.latest_checkpoint(self.model_path)) (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train.astype(np.float32).reshape(-1, 784) / 255.0 x_test = x_test.astype(np.float32).reshape(-1, 784) / 255.0 train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(128) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = testset.batch(128) trainer = Trainer(inputs=784, outputs=10) trainer.train(train_dataset, 5) import matplotlib.pyplot as plt def plt_mnist(x, y, plt_num=5): plt.figure(figsize=(20,4)) for index, (image, label) in enumerate(zip(x[0:plt_num], y[0:plt_num])): plt.imshow(np.reshape(image, (28,28)), cmap=plt.cm.gray) plt.title('{}: label: {}\n'.format('Predict', label), fontsize = 20) predict_ = trainer.model.predict(x_test) predict = [tf.argmax(e).numpy() for e in predict_] plot_mnist(x_test, predict) ###Output _____no_output_____
ECS129 Knot or Not Final No Time.ipynb
###Markdown Optimization Trick: Check whether a line segment crosses the plane before checking for the triangle intersection ###Code # helper functions to help us detect whether or not a line segment crosses a specified plane def normal(p0,p1,p2): x0, y0, z0 = p0 x1, y1, z1 = p1 x2, y2, z2 = p2 ux, uy, uz = u = [x1 - x0, y1 - y0, z1 - z0] #first vector vx, vy, vz = v = [x2 - x0, y2 - y0, z2 - z0] #sec vector u_cross_v = [uy * vz - uz * vy, uz * vx - ux * vz, ux * vy - uy * vx] #cross product point = np.array(p1) normal = np.array(u_cross_v) return point, normal def point_check(point, norm, p_plane): # In this instance we assume that the plane points 'up' and the arbitrary point # is below the plane dot_product = np.dot(norm, (point-p_plane)) if dot_product <= 0: # point is below the plane return -1 elif dot_product == 0: # point is on the plane return 0 else: return 1 # point is above the plane # This function verifies if the line segment crosses plane or not def segment_verify(l1, l2, p1, p2, p3): plane_point, plane_norm = normal(p1, p2, p3) check1 = point_check(l1,plane_norm,plane_point) check2 = point_check(l2,plane_norm,plane_point) # If both checks return the same answer, that means line segment is not crossing the plane if check1 == check2: return False # If both checks return different answer that means line segment is crossing the plane else: return True ###Output _____no_output_____ ###Markdown Algorithm to Check for Knots ###Code # determines if a line segments crosses the triangle def intersect_line_triangle(q1, q2, p1, p2, p3): def signed_tetra_volume(a, b, c, d): return np.sign(np.dot(np.cross(b - a, c - a), d - a)) numknots = 0 val = segment_verify(q1, q2, p1, p2, p3) #If we know the line segment crosses the plane, check to see if it crosses the triangle in the plane to see if theres a knot if val == True: s3 = signed_tetra_volume(p1, p2, q1, q2) s4 = signed_tetra_volume(p2, p3, q1, q2) s5 = signed_tetra_volume(p3, p1, q1, q2) if s3==s4 and s4==s5: numknots = numknots + 1 return numknots ###Output _____no_output_____ ###Markdown Optimization Trick: Once threshold is reach, remove the point ###Code # Checking if Threshold is reached and we can skip and delete the index of i+1 def lineseg_dist(p, a, b): # normalized tangent vector d = np.divide(b - a, np.linalg.norm(b - a)) # signed parallel distance components s = np.dot(a - p, d) t = np.dot(p - b, d) # clamped parallel distance h = np.maximum.reduce([s, t, 0]) # perpendicular distance component c = np.cross(p - a, d) return np.hypot(h, np.linalg.norm(c)) ###Output _____no_output_____ ###Markdown Run Code ###Code # declaring necessary variables avgCoords = [] indexofthresholds = [] filename = input("Enter in filename: ") iterations = int(input("Enter in # of iterations: ")) dataVals = np.genfromtxt(filename, delimiter='', dtype=float) totalKnotsFnd = 0 # Arbitrary # of iterations for k in range(0, iterations): nproblem = 0 for i in range(0, len(dataVals) - 2): # Attempting to straighten out the triangle xCoord = (dataVals[i][0] + dataVals[i + 1][0] + dataVals[i + 2][0]) / 3 yCoord = (dataVals[i][1] + dataVals[i + 1][1] + dataVals[i + 2][1]) / 3 zCoord = (dataVals[i][2] + dataVals[i + 1][2] + dataVals[i + 2][2]) / 3 avgCoords=[xCoord, yCoord, zCoord]; # Generating the triangle in 3d space to see if a line segment crosses it A = dataVals[i] B = dataVals[i + 1] C = avgCoords nk=0 # Checking all line segments up until indexes used for the triangle # range is until i-2 since the line seg right before the i would never cross for j in range(0, i-2): E = dataVals[j] F = dataVals[j + 1] nk += intersect_line_triangle(E, F, A, B, C) # Checking all line segments after the indexes used for the triangle for j in range(i + 2, len(dataVals)-1): E = dataVals[j] F = dataVals[j + 1] nk += intersect_line_triangle(E, F, A, B, C) # Generating the other part of the triangle in 3d space to see if a line segment crosses it A = dataVals[i + 1] B = avgCoords C = dataVals[i + 2] # Checking all line segments up until indexes used for the triangle # range is until i-1 since the line seg right before the i would never cross for j in range(0, i-1): E = dataVals[j] F = dataVals[j + 1] nk += intersect_line_triangle(E, F, A, B, C) # Checking all line segments after the indexes used for the triangle for j in range(i + 3, len(dataVals)-1): E = dataVals[j] F = dataVals[j + 1] nk += intersect_line_triangle(E, F, A, B, C) # If no knots detected, we "pull" down the triangle at i+1 to the avgCoords to begin "straightening" the sequence if nk==0: dataVals[i + 1] = avgCoords nproblem += nk # Check if distance is short enough for Threshold trick distance = lineseg_dist(avgCoords, dataVals[i], dataVals[i+2]) if distance < 0.01: indexofthresholds.append(i) # deletes the indexes that have reached the threshold since its in the middle of a straight line for i in indexofthresholds: dataVals = np.delete(dataVals, i, 0) indexofthresholds = [] if nproblem > 0: print("On iteration {}, {} knot(s) were detected".format(k, nproblem)) totalKnotsFnd += nproblem if totalKnotsFnd == 0: print("There were no knots found within the file") ###Output Enter in filename: 1zncA.crd Enter in # of iterations: 50 On iteration 12, 1 knot(s) were detected On iteration 13, 1 knot(s) were detected On iteration 14, 1 knot(s) were detected On iteration 15, 1 knot(s) were detected On iteration 16, 2 knot(s) were detected
notebooks/figures/chapter10_figures.ipynb
###Markdown Cloning the pyprobml repo ###Code !git clone https://github.com/probml/pyprobml %cd pyprobml/scripts ###Output _____no_output_____ ###Markdown Installing required software (This may take few minutes) ###Code !apt install octave -qq > /dev/null !apt-get install liboctave-dev -qq > /dev/null ###Output _____no_output_____ ###Markdown Figure 10.1: (a) Visualization of a 2d plane in a 3d space with surface normal $\mathbf w $ going through point $\mathbf x _0=(x_0,y_0,z_0)$. See text for details. (b) Visualization of optimal linear decision boundary induced by logistic regression on a 2-class, 2-feature version of the iris dataset. Figure(s) generated by [iris_logreg.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg.py) ###Code %run ./iris_logreg.py ###Output _____no_output_____ ###Markdown Figure 10.2: Plots of $\sigma (w_1 x_1 + w_2 x_2)$. Here $\mathbf w = (w_1,w_2)$ defines the normal to the decision boundary. Points to the right of this have $\sigma (\mathbf w ^\top \mathbf x )>0.5$, and points to the left have $\sigma (\mathbf w ^\top \mathbf x ) < 0.5$. Adapted from Figure 39.3 of \citep MacKay03 . Figure(s) generated by [sigmoid_2d_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/sigmoid_2d_plot.py) ###Code %run ./sigmoid_2d_plot.py ###Output _____no_output_____ ###Markdown Figure 10.4: Polynomial feature expansion applied to a two-class, two-dimensional logistic regression problem. (a) Degree $K=1$. (a) Degree $K=2$. (a) Degree $K=4$. (d) Train and test error vs degree. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code %run ./logreg_poly_demo.py ###Output _____no_output_____ ###Markdown Figure 10.5: NLL loss surface for binary logistic regression applied to Iris dataset with 1 feature and 1 bias term. The goal is to minimize the function. Figure(s) generated by [iris_logreg_loss_surface.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg_loss_surface.py) ###Code %run ./iris_logreg_loss_surface.py ###Output _____no_output_____ ###Markdown Figure 10.6: Weight decay with variance $C$ applied to two-class, two-dimensional logistic regression problem with a degree 4 polynomial. (a) $C=1$. (a) $C=316$. (a) $C=100,000$. (d) Train and test error vs $C$. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code %run ./logreg_poly_demo.py ###Output _____no_output_____ ###Markdown Figure 10.7: Example of 3-class logistic regression with 2d inputs. (a) Original features. (b) Quadratic features. Figure(s) generated by [logreg_multiclass_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_multiclass_demo.py) ###Code %run ./logreg_multiclass_demo.py ###Output _____no_output_____ ###Markdown Figure 10.11: (a) Logistic regression on some data with outliers (denoted by x). Training points have been (vertically) jittered to avoid overlapping too much. Vertical line is the decision boundary, and its posterior credible interval. (b) Same as (a) but using robust model, with a mixture likelihood. Adapted from Figure 4.13 of \citep Martin2018 . Figure(s) generated by [logreg_iris_bayes_robust_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_robust_1d_pymc3.py) ###Code %run ./logreg_iris_bayes_robust_1d_pymc3.py ###Output _____no_output_____ ###Markdown Figure 10.14: (a) Illustration of the data. (b) Log-likelihood for a logistic regression model. The line is drawn from the origin in the direction of the MLE (which is at infinity). The numbers correspond to 4 points in parameter space, corresponding to the lines in (a). (c) Unnormalized log posterior (assuming vague spherical prior). (d) Laplace approximation to posterior. Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.15: Posterior predictive distribution for a logistic regression model in 2d. Top left: contours of $p(y=1|\mathbf x , \mathbf w _ map )$. Top right: samples from the posterior predictive distribution. Bottom left: Averaging over these samples. Bottom right: moderated output (probit approximation). Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.16: Illustration of the posterior over the decision boundary for classifying iris flowers (setosa vs versicolor) using 2 input features. (a) 25 examples per class. Adapted from Figure 4.5 of \citep Martin2018 . (b) 5 examples of class 0, 45 examples of class 1. Adapted from Figure 4.8 of \citep Martin2018 . Figure(s) generated by [logreg_iris_bayes_2d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_2d_pymc3.py) ###Code %run ./logreg_iris_bayes_2d_pymc3.py ###Output _____no_output_____ ###Markdown Figure 10.17: Quadratic lower bounds on the sigmoid (logistic) function. In solid red, we plot $\sigma (x)$ vs $x$. In dotted blue, we plot the lower bound $L(x, \boldsymbol \xi )$ vs $x$ for $\boldsymbol \xi =2.5$. (a) JJ bound. This is tight at $\boldsymbol \xi = \pm 2.5$. (b) Bohning bound (\cref sec:bohningBinary . This is tight at $\boldsymbol \xi =2.5$. Figure(s) generated by [sigmoidLowerBounds.m](https://github.com/probml/pmtk3/blob/master/demos/sigmoidLowerBounds.m) ###Code !octave -W sigmoidLowerBounds.m >> _ ###Output _____no_output_____ ###Markdown Cloning the pyprobml repo ###Code !git clone https://github.com/probml/pyprobml %cd pyprobml/scripts ###Output _____no_output_____ ###Markdown Installing required software (This may take few minutes) ###Code !apt-get install octave -qq > /dev/null !apt-get install liboctave-dev -qq > /dev/null %%capture %load_ext autoreload %autoreload 2 DISCLAIMER = 'WARNING : Editing in VM - changes lost after reboot!!' from google.colab import files def interactive_script(script, i=True): if i: s = open(script).read() if not s.split('\n', 1)[0]=="## "+DISCLAIMER: open(script, 'w').write( f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s) files.view(script) %run $script else: %run $script ###Output _____no_output_____ ###Markdown Figure 10.1: (a) Visualization of a 2d plane in a 3d space with surface normal $\mathbf w $ going through point $\mathbf x _0=(x_0,y_0,z_0)$. See text for details. (b) Visualization of optimal linear decision boundary induced by logistic regression on a 2-class, 2-feature version of the iris dataset. Figure(s) generated by [iris_logreg.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg.py) ###Code interactive_script("iris_logreg.py") ###Output _____no_output_____ ###Markdown Figure 10.2: Plots of $\sigma (w_1 x_1 + w_2 x_2)$. Here $\mathbf w = (w_1,w_2)$ defines the normal to the decision boundary. Points to the right of this have $\sigma (\mathbf w ^\top \mathbf x )>0.5$, and points to the left have $\sigma (\mathbf w ^\top \mathbf x ) [Mac03] . Figure(s) generated by [sigmoid_2d_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/sigmoid_2d_plot.py) ###Code interactive_script("sigmoid_2d_plot.py") ###Output _____no_output_____ ###Markdown Figure 10.3: Illustration of how we can transform a quadratic decision boundary into a linear one by transforming the features from $\mathbf x =(x_1,x_2)$ to $\boldsymbol \phi (\mathbf x )=(x_1^2,x_2^2)$. Used with kind permission of Jean-Philippe Vert Figure 10.4: Polynomial feature expansion applied to a two-class, two-dimensional logistic regression problem. (a) Degree $K=1$. (a) Degree $K=2$. (a) Degree $K=4$. (d) Train and test error vs degree. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code interactive_script("logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.5: NLL loss surface for binary logistic regression applied to Iris dataset with 1 feature and 1 bias term. The goal is to minimize the function. Figure(s) generated by [iris_logreg_loss_surface.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg_loss_surface.py) ###Code interactive_script("iris_logreg_loss_surface.py") ###Output _____no_output_____ ###Markdown Figure 10.6: Weight decay with variance $C$ applied to two-class, two-dimensional logistic regression problem with a degree 4 polynomial. (a) $C=1$. (a) $C=316$. (a) $C=100,000$. (d) Train and test error vs $C$. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code interactive_script("logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.7: Example of 3-class logistic regression with 2d inputs. (a) Original features. (b) Quadratic features. Figure(s) generated by [logreg_multiclass_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_multiclass_demo.py) ###Code interactive_script("logreg_multiclass_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.8: A simple example of a label hierarchy. Nodes within the same ellipse have a mutual exclusion relationship between them. Figure 10.9: A flat and hierarchical softmax model $p(y|x)$, where $x$ are the input features (context) and $y$ is the output label. From https://www.quora.com/What-is-hierarchical-softmax Figure 10.10: Example of a term-document matrix, where raw counts have been replaced by their TF-IDF values (see \cref sec:tfidf ). Darker cells are larger values. From https://bit.ly/2kByLQI . Used with kind permission of Christoph Carl Kling. Figure 10.11: (a) Logistic regression on some data with outliers (denoted by x). Training points have been (vertically) jittered to avoid overlapping too much. Vertical line is the decision boundary, and its posterior credible interval. (b) Same as (a) but using robust model, with a mixture likelihood. Adapted from Figure 4.13 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_robust_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_robust_1d_pymc3.py) ###Code interactive_script("logreg_iris_bayes_robust_1d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.12: (a) Illustration of logistic and tempered logistic loss with $t_1=0.8$. (b) Illustration of sigmoid and tempered sigmoid transfer function with $t_2=2.0$. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. Figure 10.13: Illustration of standard and bi-tempered logistic regression on data with label noise. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. Figure 10.14: (a) Illustration of the data. (b) Log-likelihood for a logistic regression model. The line is drawn from the origin in the direction of the MLE (which is at infinity). The numbers correspond to 4 points in parameter space, corresponding to the lines in (a). (c) Unnormalized log posterior (assuming vague spherical prior). (d) Laplace approximation to posterior. Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.15: Posterior predictive distribution for a logistic regression model in 2d. Top left: contours of $p(y=1|\mathbf x , \mathbf w _ map )$. Top right: samples from the posterior predictive distribution. Bottom left: Averaging over these samples. Bottom right: moderated output (probit approximation). Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.16: Illustration of the posterior over the decision boundary for classifying iris flowers (setosa vs versicolor) using 2 input features. (a) 25 examples per class. Adapted from Figure 4.5 of [Mar18] . (b) 5 examples of class 0, 45 examples of class 1. Adapted from Figure 4.8 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_2d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_2d_pymc3.py) ###Code interactive_script("logreg_iris_bayes_2d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.17: Quadratic lower bounds on the sigmoid (logistic) function. In solid red, we plot $\sigma (x)$ vs $x$. In dotted blue, we plot the lower bound $L(x, \boldsymbol \xi )$ vs $x$ for $\boldsymbol \xi =2.5$. (a) JJ bound. This is tight at $\boldsymbol \xi = \pm 2.5$. (b) Bohning bound (\cref sec:bohningBinary . This is tight at $\boldsymbol \xi =2.5$. Figure(s) generated by [sigmoidLowerBounds.m](https://github.com/probml/pmtk3/blob/master/demos/sigmoidLowerBounds.m) ###Code !octave -W sigmoidLowerBounds.m >> _ ###Output _____no_output_____ ###Markdown Cloning the pyprobml repo ###Code !git clone https://github.com/probml/pyprobml %cd pyprobml/scripts ###Output _____no_output_____ ###Markdown Installing required software (This may take few minutes) ###Code !apt-get install octave -qq > /dev/null !apt-get install liboctave-dev -qq > /dev/null %%capture %load_ext autoreload %autoreload 2 DISCLAIMER = 'WARNING : Editing in VM - changes lost after reboot!!' from google.colab import files def interactive_script(script, i=True): if i: s = open(script).read() if not s.split('\n', 1)[0]=="## "+DISCLAIMER: open(script, 'w').write( f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s) files.view(script) %run $script else: %run $script def show_image(img_path): from google.colab.patches import cv2_imshow import cv2 img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) img=cv2.resize(img,(600,600)) cv2_imshow(img) ###Output _____no_output_____ ###Markdown Figure 10.1: (a) Visualization of a 2d plane in a 3d space with surface normal $\mathbf w $ going through point $\mathbf x _0=(x_0,y_0,z_0)$. See text for details. (b) Visualization of optimal linear decision boundary induced by logistic regression on a 2-class, 2-feature version of the iris dataset. Figure(s) generated by [iris_logreg.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg.py) ###Code interactive_script("iris_logreg.py") ###Output _____no_output_____ ###Markdown Figure 10.2: Plots of $\sigma (w_1 x_1 + w_2 x_2)$. Here $\mathbf w = (w_1,w_2)$ defines the normal to the decision boundary. Points to the right of this have $\sigma (\mathbf w ^\top \mathbf x )>0.5$, and points to the left have $\sigma (\mathbf w ^\top \mathbf x ) [Mac03] . Figure(s) generated by [sigmoid_2d_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/sigmoid_2d_plot.py) ###Code interactive_script("sigmoid_2d_plot.py") ###Output _____no_output_____ ###Markdown Figure 10.3: Illustration of how we can transform a quadratic decision boundary into a linear one by transforming the features from $\mathbf x =(x_1,x_2)$ to $\boldsymbol \phi (\mathbf x )=(x_1^2,x_2^2)$. Used with kind permission of Jean-Philippe Vert ###Code show_image("/content/pyprobml/notebooks/figures/images/kernelTrickQuadratic.png") ###Output _____no_output_____ ###Markdown Figure 10.4: Polynomial feature expansion applied to a two-class, two-dimensional logistic regression problem. (a) Degree $K=1$. (a) Degree $K=2$. (a) Degree $K=4$. (d) Train and test error vs degree. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code interactive_script("logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.5: NLL loss surface for binary logistic regression applied to Iris dataset with 1 feature and 1 bias term. The goal is to minimize the function. Figure(s) generated by [iris_logreg_loss_surface.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg_loss_surface.py) ###Code interactive_script("iris_logreg_loss_surface.py") ###Output _____no_output_____ ###Markdown Figure 10.6: Weight decay with variance $C$ applied to two-class, two-dimensional logistic regression problem with a degree 4 polynomial. (a) $C=1$. (a) $C=316$. (a) $C=100,000$. (d) Train and test error vs $C$. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code interactive_script("logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.7: Example of 3-class logistic regression with 2d inputs. (a) Original features. (b) Quadratic features. Figure(s) generated by [logreg_multiclass_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_multiclass_demo.py) ###Code interactive_script("logreg_multiclass_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.8: A simple example of a label hierarchy. Nodes within the same ellipse have a mutual exclusion relationship between them. ###Code show_image("/content/pyprobml/notebooks/figures/images/{labelTree}.png") ###Output _____no_output_____ ###Markdown Figure 10.9: A flat and hierarchical softmax model $p(y|x)$, where $x$ are the input features (context) and $y$ is the output label. From https://www.quora.com/What-is-hierarchical-softmax ###Code show_image("/content/pyprobml/notebooks/figures/images/softmaxFlat.png") show_image("/content/pyprobml/notebooks/figures/images/softmaxHier.png") ###Output _____no_output_____ ###Markdown Figure 10.10: Example of a term-document matrix, where raw counts have been replaced by their TF-IDF values (see \cref sec:tfidf ). Darker cells are larger values. From https://bit.ly/2kByLQI . Used with kind permission of Christoph Carl Kling. ###Code show_image("/content/pyprobml/notebooks/figures/images/LSAorig.png") ###Output _____no_output_____ ###Markdown Figure 10.11: (a) Logistic regression on some data with outliers (denoted by x). Training points have been (vertically) jittered to avoid overlapping too much. Vertical line is the decision boundary, and its posterior credible interval. (b) Same as (a) but using robust model, with a mixture likelihood. Adapted from Figure 4.13 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_robust_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_robust_1d_pymc3.py) ###Code interactive_script("logreg_iris_bayes_robust_1d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.12: (a) Illustration of logistic and tempered logistic loss with $t_1=0.8$. (b) Illustration of sigmoid and tempered sigmoid transfer function with $t_2=2.0$. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. ###Code show_image("/content/pyprobml/notebooks/figures/images/binary_loss.png") show_image("/content/pyprobml/notebooks/figures/images/binary_transfer_function.png") ###Output _____no_output_____ ###Markdown Figure 10.13: Illustration of standard and bi-tempered logistic regression on data with label noise. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. ###Code show_image("/content/pyprobml/notebooks/figures/images/bi_tempered_blog.png") ###Output _____no_output_____ ###Markdown Figure 10.14: (a) Illustration of the data. (b) Log-likelihood for a logistic regression model. The line is drawn from the origin in the direction of the MLE (which is at infinity). The numbers correspond to 4 points in parameter space, corresponding to the lines in (a). (c) Unnormalized log posterior (assuming vague spherical prior). (d) Laplace approximation to posterior. Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.15: Posterior predictive distribution for a logistic regression model in 2d. Top left: contours of $p(y=1|\mathbf x , \mathbf w _ map )$. Top right: samples from the posterior predictive distribution. Bottom left: Averaging over these samples. Bottom right: moderated output (probit approximation). Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code !octave -W logregLaplaceGirolamiDemo.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.16: Illustration of the posterior over the decision boundary for classifying iris flowers (setosa vs versicolor) using 2 input features. (a) 25 examples per class. Adapted from Figure 4.5 of [Mar18] . (b) 5 examples of class 0, 45 examples of class 1. Adapted from Figure 4.8 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_2d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_2d_pymc3.py) ###Code interactive_script("logreg_iris_bayes_2d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.17: Quadratic lower bounds on the sigmoid (logistic) function. In solid red, we plot $\sigma (x)$ vs $x$. In dotted blue, we plot the lower bound $L(x, \boldsymbol \xi )$ vs $x$ for $\boldsymbol \xi =2.5$. (a) JJ bound. This is tight at $\boldsymbol \xi = \pm 2.5$. (b) Bohning bound (\cref sec:bohningBinary . This is tight at $\boldsymbol \xi =2.5$. Figure(s) generated by [sigmoidLowerBounds.m](https://github.com/probml/pmtk3/blob/master/demos/sigmoidLowerBounds.m) ###Code !octave -W sigmoidLowerBounds.m >> _ ###Output _____no_output_____ ###Markdown Figure 10.18: A dynamic logistic regression model. $\mathbf w _t$ are the regression weights at time $t$, and $a_t = \mathbf w _t^\top \mathbf x _t$. ###Code show_image("/content/pyprobml/notebooks/figures/images/{dynamicLogregA}.png") ###Output _____no_output_____ ###Markdown Figure 10.19: (a) Data for logistic regression question. (b) Plot of $ w _k$ vs amount of correlation $c_k$ for three different estimators. ###Code show_image("/content/pyprobml/notebooks/figures/images/logregQ1b.png") show_image("/content/pyprobml/notebooks/figures/images/ridgeLassoOLS.png") ###Output _____no_output_____ ###Markdown Figure 10.1: (a) Visualization of a 2d plane in a 3d space with surface normal $\mathbf w $ going through point $\mathbf x _0=(x_0,y_0,z_0)$. See text for details. (b) Visualization of optimal linear decision boundary induced by logistic regression on a 2-class, 2-feature version of the iris dataset. Figure(s) generated by [iris_logreg.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/iris_logreg.py") ###Output _____no_output_____ ###Markdown Figure 10.2: Plots of $\sigma (w_1 x_1 + w_2 x_2)$. Here $\mathbf w = (w_1,w_2)$ defines the normal to the decision boundary. Points to the right of this have $\sigma (\mathbf w ^\top \mathbf x )>0.5$, and points to the left have $\sigma (\mathbf w ^\top \mathbf x ) [Mac03] . Figure(s) generated by [sigmoid_2d_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/sigmoid_2d_plot.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/sigmoid_2d_plot.py") ###Output _____no_output_____ ###Markdown Figure 10.3: Illustration of how we can transform a quadratic decision boundary into a linear one by transforming the features from $\mathbf x =(x_1,x_2)$ to $\boldsymbol \phi (\mathbf x )=(x_1^2,x_2^2)$. Used with kind permission of Jean-Philippe Vert ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/kernelTrickQuadratic.png") ###Output _____no_output_____ ###Markdown Figure 10.4: Polynomial feature expansion applied to a two-class, two-dimensional logistic regression problem. (a) Degree $K=1$. (a) Degree $K=2$. (a) Degree $K=4$. (d) Train and test error vs degree. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.5: NLL loss surface for binary logistic regression applied to Iris dataset with 1 feature and 1 bias term. The goal is to minimize the function. Figure(s) generated by [iris_logreg_loss_surface.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_logreg_loss_surface.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/iris_logreg_loss_surface.py") ###Output _____no_output_____ ###Markdown Figure 10.6: Weight decay with variance $C$ applied to two-class, two-dimensional logistic regression problem with a degree 4 polynomial. (a) $C=1$. (a) $C=316$. (a) $C=100,000$. (d) Train and test error vs $C$. Figure(s) generated by [logreg_poly_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_poly_demo.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/logreg_poly_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.7: Example of 3-class logistic regression with 2d inputs. (a) Original features. (b) Quadratic features. Figure(s) generated by [logreg_multiclass_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_multiclass_demo.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/logreg_multiclass_demo.py") ###Output _____no_output_____ ###Markdown Figure 10.8: A simple example of a label hierarchy. Nodes within the same ellipse have a mutual exclusion relationship between them. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/labelTree.png") ###Output _____no_output_____ ###Markdown Figure 10.9: A flat and hierarchical softmax model $p(y|x)$, where $x$ are the input features (context) and $y$ is the output label. From https://www.quora.com/What-is-hierarchical-softmax ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/softmaxFlat.png") pmlt.show_image("/pyprobml/notebooks/figures/images/softmaxHier.png") ###Output _____no_output_____ ###Markdown Figure 10.10: Example of a term-document matrix, where raw counts have been replaced by their TF-IDF values (see \cref sec:tfidf ). Darker cells are larger values. From https://bit.ly/2kByLQI . Used with kind permission of Christoph Carl Kling. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/LSAorig.png") ###Output _____no_output_____ ###Markdown Figure 10.11: (a) Logistic regression on some data with outliers (denoted by x). Training points have been (vertically) jittered to avoid overlapping too much. Vertical line is the decision boundary, and its posterior credible interval. (b) Same as (a) but using robust model, with a mixture likelihood. Adapted from Figure 4.13 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_robust_1d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_robust_1d_pymc3.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/logreg_iris_bayes_robust_1d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.12: (a) Illustration of logistic and tempered logistic loss with $t_1=0.8$. (b) Illustration of sigmoid and tempered sigmoid transfer function with $t_2=2.0$. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/binary_loss.png") pmlt.show_image("/pyprobml/notebooks/figures/images/binary_transfer_function.png") ###Output _____no_output_____ ###Markdown Figure 10.13: Illustration of standard and bi-tempered logistic regression on data with label noise. From https://ai.googleblog.com/2019/08/bi-tempered-logistic-loss-for-training.html . Used with kind permission of Ehsan Amid. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/bi_tempered_blog.png") ###Output _____no_output_____ ###Markdown Figure 10.14: (a) Illustration of the data. (b) Log-likelihood for a logistic regression model. The line is drawn from the origin in the direction of the MLE (which is at infinity). The numbers correspond to 4 points in parameter space, corresponding to the lines in (a). (c) Unnormalized log posterior (assuming vague spherical prior). (d) Laplace approximation to posterior. Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiData.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiNLL.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiPost.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiPostLaplace.png") ###Output _____no_output_____ ###Markdown Figure 10.15: Posterior predictive distribution for a logistic regression model in 2d. Top left: contours of $p(y=1|\mathbf x , \mathbf w _ map )$. Top right: samples from the posterior predictive distribution. Bottom left: Averaging over these samples. Bottom right: moderated output (probit approximation). Adapted from a figure by Mark Girolami. Figure(s) generated by [logregLaplaceGirolamiDemo.m](https://github.com/probml/pmtk3/blob/master/demos/logregLaplaceGirolamiDemo.m) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiPlugin.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiSamples.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiMc.png") pmlt.show_image("/pyprobml/notebooks/figures/images/logregLaplaceGirolamiModerated.png") ###Output _____no_output_____ ###Markdown Figure 10.16: Illustration of the posterior over the decision boundary for classifying iris flowers (setosa vs versicolor) using 2 input features. (a) 25 examples per class. Adapted from Figure 4.5 of [Mar18] . (b) 5 examples of class 0, 45 examples of class 1. Adapted from Figure 4.8 of [Mar18] . Figure(s) generated by [logreg_iris_bayes_2d_pymc3.py](https://github.com/probml/pyprobml/blob/master/scripts/logreg_iris_bayes_2d_pymc3.py) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_and_run("/pyprobml/scripts/logreg_iris_bayes_2d_pymc3.py") ###Output _____no_output_____ ###Markdown Figure 10.17: Quadratic lower bounds on the sigmoid (logistic) function. In solid red, we plot $\sigma (x)$ vs $x$. In dotted blue, we plot the lower bound $L(x, \boldsymbol \xi )$ vs $x$ for $\boldsymbol \xi =2.5$. (a) JJ bound. This is tight at $\boldsymbol \xi = \pm 2.5$. (b) Bohning bound (\cref sec:bohningBinary . This is tight at $\boldsymbol \xi =2.5$. Figure(s) generated by [sigmoidLowerBounds.m](https://github.com/probml/pmtk3/blob/master/demos/sigmoidLowerBounds.m) ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/sigmoidBoundJJ.png") pmlt.show_image("/pyprobml/notebooks/figures/images/sigmoidBoundB.png") ###Output _____no_output_____ ###Markdown Figure 10.18: A dynamic logistic regression model. $\mathbf w _t$ are the regression weights at time $t$, and $a_t = \mathbf w _t^\top \mathbf x _t$. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/dynamicLogregA.png") ###Output _____no_output_____ ###Markdown Figure 10.19: (a) Data for logistic regression question. (b) Plot of $ w _k$ vs amount of correlation $c_k$ for three different estimators. ###Code #@title Setup { display-mode: "form" } %%time # If you run this for the first time it would take ~25/30 seconds !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null && git clone https://github.com/Sekhen/colab_powertoys.git &> /dev/null !pip3 install nbimporter -qqq %cd -q /content/colab_powertoys from colab_powertoys.probml_toys import probml_toys as pmlt %cd -q /content/ pmlt.show_image("/pyprobml/notebooks/figures/images/logregQ1b.png") pmlt.show_image("/pyprobml/notebooks/figures/images/ridgeLassoOLS.png") ###Output _____no_output_____
examples/r1/period_search.ipynb
###Markdown Searching for period with narrowest folded profile (Rajwade et al 2020) ###Code cont_frac, p = riptide_search(all_bursts, pmin=2, pmax=2*365) plt.plot(p, cont_frac) plt.ylabel(r'Maximum continuous fraction$') plt.title(f'Using {len(all_bursts)} burst MJDs') plt.grid() plt.xscale('log') cont_frac_uniq, p = riptide_search(unique_days, pmin=2, pmax=2*365) plt.plot(p, cont_frac_uniq) plt.ylabel(r'Maximum continuous fraction$') plt.title(f'Using {len(unique_days)} burst MJDs from activity days') plt.xscale('log') plt.grid() ###Output _____no_output_____ ###Markdown Looks like we get a period of 156 days, which is consistent with 157±7 days reported by Rajwade et al (2020). Cool! Using P4J using QMI based on Euclidean distance for periodogram. See https://github.com/phuijse/P4J ###Code periodogram, _p = p4j_search(all_bursts, pmin=2, pmax=300*2, plot=True, save=False, mjd_err=0.1) periodogram, _p = p4j_search(unique_days, pmin=2, pmax=365*2, plot=True, save=False, mjd_err=0.1) ###Output _____no_output_____
notebooks/graph_preprocessing.ipynb
###Markdown Analyze heatmaps Load heatmaps ###Code import re # load heatmaps and graphs # combine both into graphic # add tsp plotter function def load_heatmaps(path, start_index, end_index): atoi = lambda text : int(text) if text.isdigit() else text natural_keys = lambda text : [atoi(c) for c in re.split('(\d+)', text)] try: fnames = [f for f in os.listdir(path) if os.path.isfile(f'{path}/{f}')] fnames.sort(key=natural_keys) except: print('\nBad heatmap directory!') return None if len(fnames) == 1: return np.load(f'{path}/{fnames[0]}') heat_maps = None for k, fname in enumerate(fnames): maps_start_index = int(fname.split('.')[0].split('-')[0].split('_')[-1]) maps_end_index = int(fname.split('.')[0].split('-')[-1]) index_diff = maps_end_index - maps_start_index if maps_end_index - 1 < start_index : continue if maps_start_index > end_index: continue cur_start_index = start_index - maps_start_index cur_end_index = np.minimum(index_diff, end_index - maps_start_index) if heat_maps is not None: heat_maps = np.concatenate([heat_maps, np.load(f'{path}/{fname}')[cur_start_index:cur_end_index]], axis=0) else: heat_maps = np.load(f'{path}/{fname}')[cur_start_index:cur_end_index] return heat_maps NUM_NODES = 10 n_graphs = 1000 path = f'./data/test_sets' heatmap_path = f'{path}/heatmaps' # folder = f'synthetic_nrange_{NUM_NODES}_{NUM_NODES}_{n_graphs}' folder = f'synthetic_n_{NUM_NODES}_{n_graphs}' start_index = 0 end_index = 10000 path = 'data/train_sets/synthetic_n_20_50000' heatmap_path = f'{path}/heatmaps' start_index = 0 end_index = 10000 path = 'data/valid_sets/synthetic_n_20_1000' heatmap_path = f'{path}/heatmaps' start_index = 0 end_index = 10000 path = 'data/test_sets/synthetic_n_50_1000' heatmap_path = f'{path}/heatmaps' start_index = 0 end_index = 10000 heat_maps = load_heatmaps(heatmap_path, start_index, end_index) tsp_loader = TSP_loader() g_list = tsp_loader.load_multi_tsp_as_nx(data_dir=f'{path}', scale_factor=0.000001, start_index=start_index, end_index=end_index) def load_tsp_solution(sol_filepath): solutions = [] for line in open(sol_filepath): solution = line.split(':')[-1].strip(' ').strip('\n').strip('[]') solution.replace(' ', '') solution = [int(node) for node in solution.split(',')] solutions.append(solution) return solutions all_solutions = load_tsp_solution(sol_filepath=f'{path}/solutions.txt') len(all_solutions) end_index = np.minimum(len(all_solutions), end_index) def prepare_heatmap(raw_heatmap): heat_map = np.maximum(raw_heatmap, raw_heatmap.transpose()) np.fill_diagonal(heat_map, 0) heat_map[heat_map < 10**(-10)] = 0.0 return heat_map num_probable_edges = [] edge_presence_rates = [] num_probable_edges_ = [] for i in range(end_index): heat_map = prepare_heatmap(heat_maps[i]) probable_edgelist = np.transpose(np.where(np.triu(heat_map) > 0)) # probable_edgelist = np.transpose(np.array(np.where(np.tril(heat_map) > 10**(-10)))) probable_edgelist = [list(edge) for edge in probable_edgelist] solution = all_solutions[i] tour_edges = list(zip(solution, solution[1:])) tour_edges.append((solution[-1], solution[0])) present_edges = 0 for edge in tour_edges: if list(edge) in probable_edgelist: present_edges += 1 elif list(edge)[::-1] in probable_edgelist: present_edges += 1 else: # print(edge) # print(probable_edgelist) pass edge_presence_rates.append(present_edges/len(tour_edges)) num_probable_edges.append(len(probable_edgelist)) num_probable_edges_.append(np.count_nonzero(heat_map)/2) print("Percentage of contained optimal edges:", np.mean(edge_presence_rates)) print("Mean number of probable edges per graph:", np.mean(num_probable_edges)) print("Mean number of probable edges per graph:", np.mean(num_probable_edges_)) print("Max number of probable edges:", np.max(num_probable_edges)) print("Min number of probable edges", np.min(num_probable_edges)) len(list(np.where(np.array([0,2,3,4,5,0,0,0]) == 0))) index = 20 heat_map = prepare_heatmap(heat_maps[index]) solution = all_solutions[index] print(solution) tsp_plotter = TSP_plotter() tsp_plotter.plot_nx_graph(g_list[index], solution=solution, edge_probs=heat_map, save_path='plots/edge_probability_test.png') ###Output [0, 5, 16, 4, 17, 9, 19, 13, 18, 10, 8, 3, 7, 6, 1, 14, 2, 11, 15, 12] [(0, 5), (5, 16), (16, 4), (4, 17), (17, 9), (9, 19), (19, 13), (13, 18), (18, 10), (10, 8), (8, 3), (3, 7), (7, 6), (6, 1), (1, 14), (14, 2), (2, 11), (11, 15), (15, 12), (12, 0)]
tutorials/Beam Search and Sampling Search.ipynb
###Markdown You need to define a step function ###Code temperature = 1.0 def take_step(last_predictions, state): past = state['past'].permute(1, 2, 0, 3, 4, 5) past = [past[i] for i in range(12)] past_length = state["length"][0].item() logits, past = decoder(last_predictions.view(-1, 1), state["mask"], past=past, past_length=past_length) logits = logits.squeeze(1) / temperature log_probs = F.log_softmax(logits, dim=-1) state["mask"] = F.pad(state["mask"], (0, 1), "constant", 1.0) state["past"] = torch.stack(past, 0).permute(2, 0, 1, 3, 4, 5) state["length"] += 1 return log_probs, state tokenizer = BertTokenizer.from_pretrained("bert-base-chinese") beam_search = BeamSearch(end_index=102, max_steps=40, beam_size=4) encoder = Encoder() decoder = Decoder() encoder.eval() decoder.eval() # pretrained weights encoder.load_state_dict(torch.load("5encoder.pth")) decoder.load_state_dict(torch.load("5decoder.pth")) # load validation dataset val_data = torch.load("val_data.pth") batch_size = 1 val_dataset = TensorDataset(*val_data) val_dataloader = DataLoader(dataset=val_dataset, shuffle=False, batch_size=batch_size) device = torch.device("cuda") encoder = encoder.to(device) decoder = decoder.to(device) ###Output _____no_output_____ ###Markdown Beam Search ###Code loader = iter(val_dataloader) for i in range(530): next(loader) all_outputs = [] with torch.no_grad(): for batch in tqdm.tqdm_notebook(loader): batch = [item.to(device) for item in batch] encoder_input, \ third, \ mask_encoder_input, \ mask_third, \ encoder_type_ids, \ third_type_ids = batch _, past = encoder(encoder_input, mask_encoder_input, encoder_type_ids) state = {} start_predictions = torch.LongTensor([[101]]* batch_size).to(device) mask = torch.ones(batch_size, start_predictions.shape[1]).to(device) mask = torch.cat([mask_encoder_input.float(), mask], dim=1) state["mask"] = mask state["length"] = torch.LongTensor([[0]] * batch_size).to(device) state["past"] = torch.stack(past, 0).permute(2, 0, 1, 3, 4, 5) all_top_k_predictions, log_probabilities = beam_search.search(start_predictions, state, take_step) all_outputs.append("".join(tokenizer.convert_ids_to_tokens(all_top_k_predictions[0][0].tolist()) ).replace("##", "")) print("Generated beam-1:") print("".join(tokenizer.convert_ids_to_tokens(all_top_k_predictions[0][0].tolist()) ).replace("##", "")) print("Generated beam-2:") print("".join(tokenizer.convert_ids_to_tokens(all_top_k_predictions[0][-1].tolist()) ).replace("##", "")) print("原标题+summarization:") print("".join(tokenizer.convert_ids_to_tokens(encoder_input[0].tolist()) ).replace("##", "").replace("[PAD]", "")) print("运营:") print("".join(tokenizer.convert_ids_to_tokens(third[0].tolist()) ).replace("##", "").replace("[PAD]", "")) break all_outputs = [item.replace("[SEP]", "") for item in all_outputs] with open("results", "w") as f: for line in all_outputs: f.write(line) f.write("\n") ###Output _____no_output_____ ###Markdown Sampling Based Search ###Code loader = iter(val_dataloader) for i in range(530): batch = next(loader) batch = next(loader) batch = [item.to(device) for item in batch] length = 0 top_k = 10 total_prob = 0.0 encoder_input, \ third, \ mask_encoder_input, \ mask_third, \ encoder_type_ids, \ third_type_ids = batch _, past = encoder(encoder_input, mask_encoder_input, encoder_type_ids) start_predictions = torch.LongTensor([[101]]* batch_size).to(device) mask = torch.ones(batch_size, start_predictions.shape[1]).to(device) mask = torch.cat([mask_encoder_input.float(), mask], dim=1) logits, past = decoder(start_predictions, mask, past=past, past_length=0) logits = logits.squeeze(1) / 1.0 logits = top_k_logits(logits, k=top_k) sentence = [] probs = F.softmax(logits, dim=-1) prob, prev_pred = torch.topk(probs, k=1, dim=-1) sentence.append(prev_pred) length += 1 total_prob += np.log(prob.item()) for i in range(40): mask = F.pad(mask, (0, 1), "constant", 1.0) logits, past = decoder(prev_pred, mask, past=past, past_length=length) logits = logits.squeeze(1) / 1.0 logits = top_k_logits(logits, k=top_k) probs = F.softmax(logits, dim=-1) prev_pred = torch.multinomial(probs, num_samples=1) sentence.append(prev_pred) length += 1 total_prob += np.log(probs[0, prev_pred.item()].item()) sentence = torch.cat(sentence, dim=-1) print("Generated sampled") print(np.exp(total_prob)) print("".join(tokenizer.convert_ids_to_tokens(sentence[0].tolist()) ).replace("##", "")) print("原标题+summarization:") print("".join(tokenizer.convert_ids_to_tokens(encoder_input[0].tolist()) ).replace("##", "").replace("[PAD]", "")) print("运营:") print("".join(tokenizer.convert_ids_to_tokens(third[0].tolist()) ).replace("##", "").replace("[PAD]", "")) ###Output Generated sampled 1.39405121091997e-11 终于找到火箭失利的真因了!主力要被累死了,这才是最根本祸根[SEP]终结[SEP]终结[SEP]终场哨响[SEP] 原标题+summarization: [CLS]引爆火箭交易的不是争冠,而是主力要被累死了[SEP]休斯顿火箭队感恩节之前表现不错,五连胜终于打出了上赛季的火热状态,似乎新赛季一切都随着时间的推移变得好了起来。不仅被活塞队复仇,还输给了东部鱼腩骑士队,面对着矛盾丛生的奇才队也以失利告终。我们经常在比赛的开局阶段表现得很差,经常会打得非常软,从一开始就注定要输球"。[SEP] 运营: [CLS]势在必行!火箭交易只因1.6亿花得不值,这2人成全队最大障碍[SEP]
site/_build/jupyter_execute/notebooks/09-deep-learning1/07-titanic-fastai.ipynb
###Markdown [![AnalyticsDojo](https://github.com/rpi-techfundamentals/fall2018-materials/blob/master/fig/final-logo.png?raw=1)](http://rpi.analyticsdojo.com)Fastai - Revisiting Titanicrpi.analyticsdojo.com Titanic Fastai ###Code from fastai import * from fastai.tabular import * import numpy as np import pandas as pd import pandas as pd train= pd.read_csv('https://raw.githubusercontent.com/rpi-techfundamentals/fall2018-materials/master/input/train.csv') test = pd.read_csv('https://raw.githubusercontent.com/rpi-techfundamentals/fall2018-materials/master/input/test.csv') #Create a categorical variable from the family count def family(x): if x < 2: return 'Single' elif x == 2: return 'Couple' elif x <= 4: return 'InterM' else: return 'Large' for df in [train, test]: df['Title'] = df['Name'].str.split(',').str[1].str.split(' ').str[1] df['Title'] = df['Title'].replace(['Lady', 'the Countess', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona', 'Ms', 'Mme', 'Mlle'], 'Rare') df['Age']=df['Age'].fillna(df['Age'].median()) df['Fare']=df['Fare'].fillna(df['Fare'].median()) df['Embarked']=df['Embarked'].fillna('S') df['NameLength'] = df['Name'].map(lambda x: len(x)) df['FamilyS'] = df['SibSp'] + df['Parch'] + 1 df['FamilyS'] = df['FamilyS'].apply(family) train.isnull().sum(axis = 0) train.head() dep_var = 'Survived' cat_names = ['Pclass', 'Sex', 'Embarked', 'Title', 'FamilyS'] cont_names = ['Age', 'Fare', 'SibSp', 'Parch', 'NameLength'] procs = [FillMissing, Categorify, Normalize] test_data = (TabularList.from_df(test, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs)) data = (TabularList.from_df(train, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(list(range(0,200))) .label_from_df(cols=dep_var) .add_test(test_data, label=0) .databunch()) #Shows the Data data.show_batch() #Define our Learner learn = tabular_learner(data, layers=[300,100], metrics=accuracy) learn.lr_find() learn.recorder.plot() #fit the learner learn.fit(7, 1e-2) #Number of epocs and the learning rate. learn.save('final_train') #Show the results learn.show_results() #This will get predictions predictions, *_ = learn.get_preds(DatasetType.Test) labels = to_np(np.argmax(predictions, 1)) labels.shape #Writing to File submission=pd.DataFrame(test.loc[:,['PassengerId']]) submission['Survived']=labels #Any files you save will be available in the output tab below submission.to_csv('submission.csv', index=False) from google.colab import files files.download('submission.csv') ###Output _____no_output_____ ###Markdown [![AnalyticsDojo](https://github.com/rpi-techfundamentals/fall2018-materials/blob/master/fig/final-logo.png?raw=1)](http://rpi.analyticsdojo.com)Fastai - Revisiting Titanicrpi.analyticsdojo.com Titanic Fastai ###Code from fastai import * from fastai.tabular import * import numpy as np import pandas as pd import pandas as pd train= pd.read_csv('https://raw.githubusercontent.com/rpi-techfundamentals/fall2018-materials/master/input/train.csv') test = pd.read_csv('https://raw.githubusercontent.com/rpi-techfundamentals/fall2018-materials/master/input/test.csv') #Create a categorical variable from the family count def family(x): if x < 2: return 'Single' elif x == 2: return 'Couple' elif x <= 4: return 'InterM' else: return 'Large' for df in [train, test]: df['Title'] = df['Name'].str.split(',').str[1].str.split(' ').str[1] df['Title'] = df['Title'].replace(['Lady', 'the Countess', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona', 'Ms', 'Mme', 'Mlle'], 'Rare') df['Age']=df['Age'].fillna(df['Age'].median()) df['Fare']=df['Fare'].fillna(df['Fare'].median()) df['Embarked']=df['Embarked'].fillna('S') df['NameLength'] = df['Name'].map(lambda x: len(x)) df['FamilyS'] = df['SibSp'] + df['Parch'] + 1 df['FamilyS'] = df['FamilyS'].apply(family) train.isnull().sum(axis = 0) train.head() dep_var = 'Survived' cat_names = ['Pclass', 'Sex', 'Embarked', 'Title', 'FamilyS'] cont_names = ['Age', 'Fare', 'SibSp', 'Parch', 'NameLength'] procs = [FillMissing, Categorify, Normalize] test_data = (TabularList.from_df(test, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs)) data = (TabularList.from_df(train, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(list(range(0,200))) .label_from_df(cols=dep_var) .add_test(test_data, label=0) .databunch()) #Shows the Data data.show_batch() #Define our Learner learn = tabular_learner(data, layers=[300,100], metrics=accuracy) learn.lr_find() learn.recorder.plot() #fit the learner learn.fit(7, 1e-2) #Number of epocs and the learning rate. learn.save('final_train') #Show the results learn.show_results() #This will get predictions predictions, *_ = learn.get_preds(DatasetType.Test) labels = to_np(np.argmax(predictions, 1)) labels.shape #Writing to File submission=pd.DataFrame(test.loc[:,['PassengerId']]) submission['Survived']=labels #Any files you save will be available in the output tab below submission.to_csv('submission.csv', index=False) from google.colab import files files.download('submission.csv') ###Output _____no_output_____
d2l/chapter_preliminaries/calculus.ipynb
###Markdown 微分:label:`sec_calculus`在2500年前,古希腊人把一个多边形分成三角形,并把它们的面积相加,才找到计算多边形面积的方法。为了求出曲线形状(比如圆)的面积,古希腊人在这样的形状上刻内接多边形。如 :numref:`fig_circle_area`所示,内接多边形的等长边越多,就越接近圆。这个过程也被称为*逼近法*(method of exhaustion)。![用逼近法求圆的面积](../img/polygon-circle.svg):label:`fig_circle_area`事实上,逼近法就是*积分*(integral calculus)的起源,我们将在 :numref:`sec_integral_calculus`中详细描述。2000多年后,微积分的另一支,*微分*(differential calculus),被发明出来。在微分学最重要的应用是优化问题,即考虑如何把事情做到最好。正如在 :numref:`subsec_norms_and_objectives`中讨论的那样,这种问题在深度学习中是无处不在的。在深度学习中,我们“训练”模型,不断更新它们,使它们在看到越来越多的数据时变得越来越好。通常情况下,变得更好意味着最小化一个*损失函数*(loss function),即一个衡量“我们的模型有多糟糕”这个问题的分数。这个问题比看上去要微妙得多。最终,我们真正关心的是生成一个能够在我们从未见过的数据上表现良好的模型。但我们只能将模型与我们实际能看到的数据相拟合。因此,我们可以将拟合模型的任务分解为两个关键问题:(1)*优化*(optimization):用模型拟合观测数据的过程;(2)*泛化*(generalization):数学原理和实践者的智慧,能够指导我们生成出有效性超出用于训练的数据集本身的模型。为了帮助你在后面的章节中更好地理解优化问题和方法,这里我们对深度学习中常用的微分知识提供了一个非常简短的入门教程。 导数和微分我们首先讨论导数的计算,这是几乎所有深度学习优化算法的关键步骤。在深度学习中,我们通常选择对于模型参数可微的损失函数。简而言之,这意味着,对于每个参数,如果我们把这个参数*增加*或*减少*一个无穷小的量,我们可以知道损失会以多快的速度增加或减少,假设我们有一个函数$f: \mathbb{R}^n \rightarrow \mathbb{R}$,其输入和输出都是标量。(**$f$的*导数*被定义为**)(**$$f'(x) = \lim_{h \rightarrow 0} \frac{f(x+h) - f(x)}{h},$$**):eqlabel:`eq_derivative`如果这个极限存在。如果$f'(a)$存在,则称$f$在$a$处是*可微*(differentiable)的。如果$f$在一个区间内的每个数上都是可微的,则此函数在此区间中是可微的。我们可以将 :eqref:`eq_derivative`中的导数$f'(x)$解释为$f(x)$相对于$x$的*瞬时*(instantaneous)变化率。所谓的瞬时变化率是基于$x$中的变化$h$,且$h$接近$0$。为了更好地解释导数,让我们用一个例子来做实验。(**定义$u=f(x)=3x^2-4x$.**) ###Code %matplotlib inline import numpy as np from IPython import display from d2l import torch as d2l def f(x): return 3 * x ** 2 - 4 * x ###Output _____no_output_____ ###Markdown [**通过令$x=1$并让$h$接近$0$,**] :eqref:`eq_derivative`中(**$\frac{f(x+h)-f(x)}{h}$的数值结果接近$2$**)。虽然这个实验不是一个数学证明,但我们稍后会看到,当$x=1$时,导数$u'$是$2$。 ###Code def numerical_lim(f, x, h): return (f(x + h) - f(x)) / h h = 0.1 for i in range(5): print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}') h *= 0.1 ###Output h=0.10000, numerical limit=2.30000 h=0.01000, numerical limit=2.03000 h=0.00100, numerical limit=2.00300 h=0.00010, numerical limit=2.00030 h=0.00001, numerical limit=2.00003 ###Markdown 让我们熟悉一下导数的几个等价符号。给定$y=f(x)$,其中$x$和$y$分别是函数$f$的自变量和因变量。以下表达式是等价的:$$f'(x) = y' = \frac{dy}{dx} = \frac{df}{dx} = \frac{d}{dx} f(x) = Df(x) = D_x f(x),$$其中符号$\frac{d}{dx}$和$D$是*微分运算符*,表示*微分*操作。我们可以使用以下规则来对常见函数求微分:* $DC = 0$ ($C$ 是一个常数)* $Dx^n = nx^{n-1}$ (*幂律*(power rule), $n$是任意实数)* $De^x = e^x$* $D\ln(x) = 1/x$为了微分一个由一些简单函数(如上面的常见函数)组成的函数,下面的法则使用起来很方便。假设函数$f$和$g$都是可微的,$C$是一个常数,我们有:*常数相乘法则*$$\frac{d}{dx} [Cf(x)] = C \frac{d}{dx} f(x),$$*加法法则*$$\frac{d}{dx} [f(x) + g(x)] = \frac{d}{dx} f(x) + \frac{d}{dx} g(x),$$*乘法法则*$$\frac{d}{dx} [f(x)g(x)] = f(x) \frac{d}{dx} [g(x)] + g(x) \frac{d}{dx} [f(x)],$$*除法法则*$$\frac{d}{dx} \left[\frac{f(x)}{g(x)}\right] = \frac{g(x) \frac{d}{dx} [f(x)] - f(x) \frac{d}{dx} [g(x)]}{[g(x)]^2}.$$现在我们可以应用上述几个法则来计算$u'=f'(x)=3\frac{d}{dx}x^2-4\frac{d}{dx}x=6x-4$。因此,通过令$x=1$,我们有$u'=2$:这一点得到了我们在本节前面的实验的支持,在这个实验中,数值结果接近$2$。当$x=1$时,此导数也是曲线$u=f(x)$切线的斜率。[**为了对导数的这种解释进行可视化,**]我们将使用`matplotlib`,这是一个Python中流行的绘图库。要配置`matplotlib`生成图形的属性,我们需要(**定义几个函数**)。在下面,`use_svg_display`函数指定`matplotlib`软件包输出svg图表以获得更清晰的图像。注意,注释`@save`是一个特殊的标记,会将对应的函数、类或语句保存在`d2l`包中因此,以后无须重新定义就可以直接调用它们(例如,`d2l.use_svg_display()`)。 ###Code def use_svg_display(): #@save """使用svg格式在Jupyter中显示绘图。""" display.set_matplotlib_formats('svg') ###Output _____no_output_____ ###Markdown 我们定义`set_figsize`函数来设置图表大小。注意,这里我们直接使用`d2l.plt`,因为导入语句 `from matplotlib import pyplot as plt`已在前言中标记为保存到`d2l`包中。 ###Code def set_figsize(figsize=(3.5, 2.5)): #@save """设置matplotlib的图表大小。""" use_svg_display() d2l.plt.rcParams['figure.figsize'] = figsize ###Output _____no_output_____ ###Markdown 下面的`set_axes`函数用于设置由`matplotlib`生成图表的轴的属性。 ###Code #@save def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend): """设置matplotlib的轴。""" axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) axes.set_xscale(xscale) axes.set_yscale(yscale) axes.set_xlim(xlim) axes.set_ylim(ylim) if legend: axes.legend(legend) axes.grid() ###Output _____no_output_____ ###Markdown 通过这三个用于图形配置的函数,我们定义了`plot`函数来简洁地绘制多条曲线,因为我们需要在整个书中可视化许多曲线。 ###Code #@save def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None, xscale='linear', yscale='linear', fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None): """绘制数据点。""" if legend is None: legend = [] set_figsize(figsize) axes = axes if axes else d2l.plt.gca() # 如果 `X` 有一个轴,输出True def has_one_axis(X): return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list) and not hasattr(X[0], "__len__")) if has_one_axis(X): X = [X] if Y is None: X, Y = [[]] * len(X), X elif has_one_axis(Y): Y = [Y] if len(X) != len(Y): X = X * len(Y) axes.cla() for x, y, fmt in zip(X, Y, fmts): if len(x): axes.plot(x, y, fmt) else: axes.plot(y, fmt) set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend) ###Output _____no_output_____ ###Markdown 现在我们可以[**绘制函数$u=f(x)$及其在$x=1$处的切线$y=2x-3$**],其中系数$2$是切线的斜率。 ###Code x = np.arange(0, 3, 0.1) plot(x, [f(x), 2 * x - 3], 'x', 'f(x)', legend=['f(x)', 'Tangent line (x=1)']) ###Output _____no_output_____
ds/Homework_2.ipynb
###Markdown Домашнее задание №2 Pandas В этом домашнем задании не будет как таковых задач. Только несколько простых заданий, чтобы освоиться в важнейшей библиотеке pandas. №1Импортируйте пандас как pd и считайте файл TSLA.csv (https://www.kaggle.com/efekurdoglu/teslastockprice/data) как датафрейм №2Удалите из столбца Close значения, целые части которых кратны 6 №3Теперь попробуйте заполнить пропуски средним значением в столбце, потом минимумом, потом медианой №4А теперь попробуйте заполнить эти пропуски средним между всеми значениями, кратным 5 и всеми значениями, кратным 7 (т.е. выберите все значение, кратные 5 и найдите их среднее, а потом все значение, кратные 7, найдите среднее среди них, а потом усредните два полученных значения). №5Импортируйте matplotlib.pyplot как plt. Постройте графики получишвихся наборов значений. Чтобы получше разглядеть, найдите как в pyplot задать размер графика и поставьте побольше. Оцените результаты, выберите какой вариант кажется наиболее разумным. №6Приведите столбец High к строковому типу. Найдите все строки, где содержится цифра 9. *Подсказка*: загуглите pandas.str №7А теперь найдите в этом столбце строки, которые содержат цифру 4, но не содержат цифру 8. №8И наконец найдите в столбце Hihg (пусть там все еще будет строковый тип) такие строчки, у которых в дробной части есть цифра 3 и замените ее на 4. *Подсказка*: может помочь метод split. А потом замените эти значения в датафрейме на какое-нибудь слово. Например, лопата. №9Сейчас вы научитесь нормализовывать данные. Но сначала я объясню, зачем. Алгоритмы машинного обучения, как правило, работают лучше или сходятся быстрее, когда различные функции (переменные) имеют меньший масштаб. Поэтому перед обучением на них моделей машинного обучения данные обычно нормализуются.Нормализация также делает процесс обучения менее чувствительным к масштабу функций. Это приводит к улучшению коэффициентов после тренировки.Этот процесс повышения пригодности функций для обучения путем изменения масштаба называется масштабированием функций. Вот формула, которую часто применяют для нормализации значений одного столбца:$$ x_{norm} = \frac{x - x_{min}}{x_{max} - x_{min}}, $$где, понятное дело x - значение в конкретной строке выбранного столбца, xmax - максимальное значение этого столбца, а xmin - минимальное значение этого столбца.Примените эту формулу к столбцу Open (используйте метод apply). №10И, правда наконец, создайте пустой датафрейм с такими же колонками, как TSLA. Пусть в нем будет 10 строк. И пусть они будут заполнены такими значениями: в i-й строке j-го столбца должна быть j-ая степень числа i+j. Добавьте этот датафрейм в конец исходного. Используйте метод tail, чтобы увидеть конец полученного датафрейма и порадоваться, что у вас все получилось и вы готовы реально обрабатывать датафреймы и заниматься дата сасом. ###Code ###Output _____no_output_____
COVID19-DataAnalysisForBrazil.ipynb
###Markdown Analysis of COVID-19 Infections and Death Data (Brazil) Project creation date: May 6, 2020 Dataset from Ministry of Health: https://covid.saude.gov.br Project author: Ricardo Szczerbacki ([email protected]) Project on Github: https://github.com/ricardocopa/Covid19 License: MIT License Project ObjectivesThis projet does an Exploratory Data Analysis (and some simple predictions) of the dataset made available by the brazilian Ministry of Health, that contains daily numbers on infections and death for brazilian states. The idea was to have more insights for the COVID-19 status in Brazil and Rio de Janeiro, where the author lives.Also simple plots for comparison with other countrie were made, using data available in **Our World in Data** (https://covid.ourworldindata.org/data/owid-covid-data.csv)This website was automatically updated (without human intervention) daily until the day before December 10 of 2021, when a hacker attack disabled many brazilian government portals including the one used as source for this portal. ###Code import math import numpy as np import pandas as pd from IPython.display import HTML, Markdown from datetime import datetime, timedelta import matplotlib.pyplot as plt import matplotlib.dates as mdates import seaborn as sns #from zipfile import ZipFile #from rarfile import RarFile from pyunpack import Archive import glob import os import sys import time import subprocess import locale locale.setlocale(locale.LC_ALL, 'en_us.utf-8') import warnings warnings.filterwarnings('ignore') display(HTML(''' <link rel="stylesheet" href="https://cdn.jupyter.org/notebook/5.1.0/style/style.min.css"> <script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> Python source code is hidden by default for better visualization. To toggle source code visualization click <a href="javascript:code_toggle()">here</a>.''')) ###Output _____no_output_____ ###Markdown DatasetFile __HIST_PAINEL_COVIDBR.zip__ or __HIST_PAINEL_COVIDBR.rar__ (a compressed CSV file) downloaded from brazilian Ministry of Health ###Code files = glob.glob("HIST_PAINEL_COVIDBR.*") datasetFileName = files[0] html_code = ''' You can download the dataset file originally used by clicking <a href="http://covid.rj1.info/{}">here</a> '''.format(datasetFileName) display(HTML(html_code)) ###Output _____no_output_____ ###Markdown File contents (first 5 rows): ###Code ## how files where made available until May 10 2020 # covid = pd.read_csv('arquivo_geral.csv', delimiter = ';') # how files where mande available until october (or until the excel files exceed the max rows limit) #covidFull = pd.read_excel(datasetFileName) #covidFull = pd.read_csv(datasetFileName, parse_dates = True, delimiter = ';') dataframes = [] for f in glob.glob('extractedFiles/*'): os.remove(f) Archive(datasetFileName).extractall('extractedFiles/') files = glob.glob("extractedFiles/*") for filename in files: df = pd.read_csv(filename, parse_dates = True, delimiter = ';') dataframes.append(df) covidFull = pd.concat(dataframes, axis=0, ignore_index=True) print('Columns: Region, State, City, State Code, City Code, Health Region Code, Health Region Name, Date, Week, Population 2019, Total Infections, New Infections, Total Deaths, New Deaths, Total Recovered, New Patients') display(covidFull.head()) # converting to the old file format covid = covidFull[covidFull.estado.notnull()] covid = covid[covid.codmun.isnull()] covid = covid[['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulado', 'obitosNovos', 'obitosAcumulado']] covid.columns = ['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulados', 'obitosNovos', 'obitosAcumulados'] # preparing a dataframe for the Rio de Janeiro city covidRioDeJaneiro = covidFull.loc[covidFull['municipio'] == 'Rio de Janeiro'] covidRioDeJaneiro = covidRioDeJaneiro[['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulado', 'obitosNovos', 'obitosAcumulado']] covidRioDeJaneiro.columns = ['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulados', 'obitosNovos', 'obitosAcumulados'] # preparing a dataframe for the Mangaratiba city covidMangaratiba = covidFull.loc[covidFull['municipio'] == 'Mangaratiba'] covidMangaratiba = covidMangaratiba[['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulado', 'obitosNovos', 'obitosAcumulado']] covidMangaratiba.columns = ['regiao', 'estado', 'data', 'casosNovos', 'casosAcumulados', 'obitosNovos', 'obitosAcumulados'] firstDateData = datetime.strptime(covid['data'].min(), '%Y-%m-%d').strftime('%d/%m/%Y') lastDayData = datetime.strptime(covid['data'].max(), '%Y-%m-%d').strftime('%d/%m/%Y') numberOfDaysData = covid['data'].nunique() covidMaxDeaths = covid.sort_values('obitosNovos', ascending = False).iloc[0] maxDeathsValue = covidMaxDeaths['obitosNovos'] maxDeathsState = covidMaxDeaths['estado'] maxDeathsDay = datetime.strptime(covidMaxDeaths['data'], '%Y-%m-%d').strftime('%d/%m/%Y') covidRJ = covid.loc[covid['estado'] == 'RJ'] covidMaxDeathsRJ = covidRJ.sort_values('obitosNovos', ascending = False).iloc[0] maxDeathsRJValue = covidMaxDeathsRJ['obitosNovos'] maxDeathsRJDay = datetime.strptime(covidMaxDeathsRJ['data'], '%Y-%m-%d').strftime('%d/%m/%Y') covidMaxDeathsBR = covid.groupby('data')['obitosNovos'].sum().reset_index(name ='obitosNovos') covidMaxDeathsBR = covidMaxDeathsBR.sort_values('obitosNovos', ascending = False).iloc[0] maxDeathsValueBR = covidMaxDeathsBR['obitosNovos'] maxDeathsDayBR = datetime.strptime(covidMaxDeathsBR['data'], '%Y-%m-%d').strftime('%d/%m/%Y') html_code = ''' <div style=" background-color:LightGoldenRodYellow; border-style: solid; padding-top: 10px; padding-right: 10px; padding-bottom: 20px; padding-left: 10px;"> <img style="float: right;" width=30px, height=30px src="info2.png"> <h2> Some General Information About the Dataset </h2> <p style="font-size:18px"> The file have data from <b>{}</b> to <b>{}</b></p> <p style="font-size:18px"> <b>{}</b> was the state with the greater number of deaths in one day. There were <b>{:n}</b> deaths on <b>{}</b>. </p> <p style="font-size:18px"> The day with the greater number of deaths in <b>RJ</b> happend on <b>{}</b>, with a total of <b>{:n}</b> deaths registered. </p> <p style="font-size:18px"> The day with the greater number of deaths all over Brazil happend on <b>{}</b>, with a total of <b>{:n}</b> deaths registered. </p> <p style="font-size:18px"> <b>PS</b>: All dates in this page are displayed in the format: DD/MM/YYYY</p> </div> '''.format(firstDateData, lastDayData, maxDeathsState, maxDeathsValue, maxDeathsDay, maxDeathsRJDay, maxDeathsRJValue, maxDeathsDayBR, maxDeathsValueBR) display(HTML(html_code)) # Creating normalized columns for infections and deaths by dividing it by the population multiplied by a million (cases per million people). Source for brazilian population data: IBGE population estimation for 2019 # https://pt.wikipedia.org/wiki/Lista_de_unidades_federativas_do_Brasil_por_popula%C3%A7%C3%A3o data = {'state': ['AC','AL','AP','AM','BA','CE','DF', 'ES','GO','MA','MT','MS','MG','PA', 'PB','PR','PE','PI','RJ','RN','RS', 'RO','RR','SC','SP','SE','TO'], 'population': [881935, 3337357, 845731, 4144597, 14873064, 9132078, 3015268, 4018650, 7018354, 7075181, 3484466, 2778986, 21168791, 8602865, 4018127, 11433957, 9557071, 3273227, 17264943, 3506853, 11377239, 1777225, 605761, 7164788, 45919049, 2298696, 1572866] } populationByState = pd.DataFrame (data, columns = ['state','population']) covid = covid.merge(populationByState, left_on='estado', right_on='state') covid['normTotalInfections'] = covid['casosAcumulados']/covid['population'] * 1000000. covid['normTotalDeaths'] = covid['obitosAcumulados']/covid['population'] * 1000000. lastDay = covid['data'].max() lastDayFormatted = datetime.strptime(lastDay, '%Y-%m-%d').strftime('%d/%m/%Y') covidLastDay = covid.loc[covid['data'] == lastDay] infectionsBR = covidLastDay.sum()['casosAcumulados'] deathsBR = covidLastDay.sum()['obitosAcumulados'] infectionsLastDayBR = covidLastDay.sum()['casosNovos'] deathsLastDayBR = covidLastDay.sum()['obitosNovos'] covidRJ = covidLastDay.loc[covid['estado'] == 'RJ'] infectionsLastDayRJ = covidRJ.sum()['casosNovos'] deathsLastDayRJ = covidRJ.sum()['obitosNovos'] infectionsRJ = covidRJ.sum()['casosAcumulados'] deathsRJ = covidRJ.sum()['obitosAcumulados'] html_code = ''' <div style=" background-color:LightGoldenRodYellow; border-style: solid; padding-top: 10px; padding-right: 10px; padding-bottom: 20px; padding-left: 10px;"> <img style="float: right;" width=30px, height=30px src="info2.png"> <h2> Numbers for the Last 24 Hours (last day of the dataset) </h2> <p style="font-size:18px"> <b>{}</b> is the last day in the dataset.</p> <p> <p style="font-size:18px"> <b>{:,.0f} infections</b> were confirmed in <b>Brazil</b> on this day, of <b>{:,.0f} total infections</b>. </p> <p style="font-size:18px"> <b>{:,.0f} deaths</b> were confirmed in <b>Brazil</b> on this day, of <b>{:,.0f} total deaths</b>. </p> <p> <p style="font-size:18px"> <b>{:,.0f} infections</b> were confirmed in <b>RJ</b> on this day, of <b>{:,.0f} total infections</b>. </p> <p style="font-size:18px"> <b>{:,.0f} deaths</b> were confirmed in <b>RJ</b> on this day, of <b>{:,.0f} total deaths</b>. </p> </div> '''.format(lastDayFormatted, infectionsLastDayBR, infectionsBR, deathsLastDayBR, deathsBR, infectionsLastDayRJ, infectionsRJ, deathsLastDayRJ, deathsRJ) display(HTML(html_code)) ###Output _____no_output_____ ###Markdown Last Known Status for Some CountriesUsing the last information dowloaded from **Our World in Data** (https://covid.ourworldindata.org/data/owid-covid-data.csv), I list below the 3 countries in the worst situation (deaths per million people), the 3 countries in the best situation and other 3 countries of interest (Brazil, France and Israel), showing their position in the world ranking in the first column. OBS: only countries with death rates greater than zero are included in the 2 rankings below. ###Code worldCovid = pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv', parse_dates = True, delimiter = ',') worldCovid = worldCovid.loc[~worldCovid['location'].isin(['Africa', 'Asia', 'Europe', 'European Union' 'International', 'North America', 'Oceania', 'South America', 'World'])] lastDate = worldCovid['date'].max() # Ignore the last day with data (usually faulty) lastDate = (datetime.strptime(lastDate, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d') formattedLastDate = datetime.strptime(lastDate, '%Y-%m-%d').strftime('%B %d, %Y').replace(' 0', ' ') worldCovid = worldCovid.loc[worldCovid['date'] == lastDate] worldCovidTotal = worldCovid.copy() worldCovidTotal = worldCovid[['location', 'total_deaths_per_million']] worldCovidTotal.columns = ['Country', 'Deaths/MM inhab.'] worldCovidTotal.sort_values('Deaths/MM inhab.', ascending=False, inplace=True) worldCovidTotal.reset_index(inplace = True) worldCovidTotal['Ranking'] = worldCovidTotal.index+1 worldCovidTotal = worldCovidTotal.fillna(0) worldCovidTotal = worldCovidTotal.loc[worldCovidTotal['Deaths/MM inhab.'] > 0] worldCovidTotal = worldCovidTotal[['Ranking', 'Country', 'Deaths/MM inhab.']] # Get the 3 best and the 3 worst status countries worst1StatusCountry = worldCovidTotal.iloc[0]['Country'] worst2StatusCountry = worldCovidTotal.iloc[1]['Country'] worst3StatusCountry = worldCovidTotal.iloc[2]['Country'] best1StatusCountry = worldCovidTotal.iloc[-1]['Country'] best2StatusCountry = worldCovidTotal.iloc[-2]['Country'] best3StatusCountry = worldCovidTotal.iloc[-3]['Country'] # Filter only the desired countries (3 best, 3 worst, Brazil, France and Israel) covidByCountrySelected = worldCovidTotal.loc[worldCovidTotal['Country'].isin([worst1StatusCountry, worst2StatusCountry, worst3StatusCountry, best1StatusCountry, best2StatusCountry, best3StatusCountry, 'Brazil', 'France', 'Israel'])] display(Markdown('**Cumulative death rates (registered deaths from the beginning)** ')) display(HTML(covidByCountrySelected.to_html(index=False))) display(Markdown('*Information updated on ' + formattedLastDate)) # Plot the distribution of deaths/MM inhab. per country showing the countries of interest (Brazil, France and Israel) worldCovidTotal = worldCovidTotal[['Country', 'Deaths/MM inhab.']] covidByCountryBrazil = worldCovidTotal.loc[worldCovidTotal['Country'] == 'Brazil'] covidByCountryFrance = worldCovidTotal.loc[worldCovidTotal['Country'] == 'France'] covidByCountryIsrael = worldCovidTotal.loc[worldCovidTotal['Country'] == 'Israel'] axis = worldCovidTotal.plot() axis.axes.get_xaxis().set_visible(False) covidByCountryBrazil.plot(ax=axis, linestyle='',marker='o', markersize=12, color='green') covidByCountryFrance.plot(ax=axis, linestyle='',marker='o', markersize=12, color='red') covidByCountryIsrael.plot(ax=axis, linestyle='',marker='o', markersize=12, color='blue') axis.legend(['Deaths/MM inhab.', 'Brazil', 'France', 'Israel']) plt.show() worldCovidCurrent = worldCovid.copy() worldCovidCurrent = worldCovid[['location', 'new_deaths_smoothed_per_million']] worldCovidCurrent.columns = ['Country', 'Deaths/MM inhab.'] worldCovidCurrent.sort_values('Deaths/MM inhab.', ascending=False, inplace=True) worldCovidCurrent.reset_index(inplace = True) worldCovidCurrent['Ranking'] = worldCovidCurrent.index+1 worldCovidCurrent = worldCovidCurrent.fillna(0) worldCovidCurrent = worldCovidCurrent.loc[worldCovidCurrent['Deaths/MM inhab.'] > 0] worldCovidCurrent = worldCovidCurrent[['Ranking', 'Country', 'Deaths/MM inhab.']] # Get the 3 best and the 3 worst status countries worst1StatusCountry = worldCovidCurrent.iloc[0]['Country'] worst2StatusCountry = worldCovidCurrent.iloc[1]['Country'] worst3StatusCountry = worldCovidCurrent.iloc[2]['Country'] best1StatusCountry = worldCovidCurrent.iloc[-1]['Country'] best2StatusCountry = worldCovidCurrent.iloc[-2]['Country'] best3StatusCountry = worldCovidCurrent.iloc[-3]['Country'] # Filter only the desired countries (3 best, 3 worst, Brazil, France and Israel) covidByCountrySelected = worldCovidCurrent.loc[worldCovidCurrent['Country'].isin([worst1StatusCountry, worst2StatusCountry, worst3StatusCountry, best1StatusCountry, best2StatusCountry, best3StatusCountry, 'Brazil', 'France', 'Israel'])] display(Markdown('**Current death rates (average from the last 7 days)** ')) display(HTML(covidByCountrySelected.to_html(index=False))) display(Markdown('*Information updated on ' + formattedLastDate)) # Plot the distribution of deaths/MM inhab. per country showing the countries of interest (Brazil, France and Israel) worldCovidCurrent = worldCovidCurrent[['Country', 'Deaths/MM inhab.']] covidByCountryBrazil = worldCovidCurrent.loc[worldCovidCurrent['Country'] == 'Brazil'] covidByCountryFrance = worldCovidCurrent.loc[worldCovidCurrent['Country'] == 'France'] covidByCountryIsrael = worldCovidCurrent.loc[worldCovidCurrent['Country'] == 'Israel'] axis = worldCovidCurrent.plot() axis.axes.get_xaxis().set_visible(False) covidByCountryBrazil.plot(ax=axis, linestyle='',marker='o', markersize=12, color='green') covidByCountryFrance.plot(ax=axis, linestyle='',marker='o', markersize=12, color='red') covidByCountryIsrael.plot(ax=axis, linestyle='',marker='o', markersize=12, color='blue') axis.legend(['Deaths/MM inhab.', 'Brazil', 'France', 'Israel']) plt.show() covidLastDay = covidLastDay[['estado','casosAcumulados', 'obitosAcumulados', 'normTotalInfections', 'normTotalDeaths']] covidLastDay.sort_values('normTotalDeaths', ascending=False, inplace=True) covidLastDay.set_index('estado', inplace=True) covidLastDay2 = covidLastDay.copy() covidLastDay2.columns = ['Infections', 'Deaths', 'Infections/MM inh.', 'Deaths/MM inh.'] html_code = ''' <div style=" background-color:LightGoldenRodYellow; border-style: solid; padding-top: 10px; padding-right: 10px; padding-bottom: 40px; padding-left: 10px;"> <img style="float: right;" width=30px, height=30px src="info2.png"> <h2> Infections and Deaths by State </h2> <p style="font-size:18px"> Sorted by the severity (deaths per million people). States in worse situation are near the top. </p> <p style="font-size:18px"> The k-means method was used for clustering the states in 3 classes of severity. </p> </div> ''' display(HTML(html_code)) from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=3, random_state=42) covidLastDay['myClass'] = kmeans.fit_predict(covidLastDay[['normTotalDeaths']]) worstClass = covidLastDay.iloc[0]['myClass'] bestClass = covidLastDay.iloc[-1]['myClass'] def highlight(s): tamanho = s.size if s.myClass == worstClass: return ['background-color: orange']*tamanho elif s.myClass != bestClass: return ['background-color: yellow']*tamanho else: return ['background-color: white']*tamanho covidLastDay.reset_index(inplace=True) covidLastDay2 = covidLastDay.copy() covidLastDay2.columns=['State', 'Infections', 'Deaths', 'Infections/MM inhab.', 'Deaths/MM inhab.', 'myClass'] display(covidLastDay2.style.apply(highlight, axis=1)) ###Output _____no_output_____ ###Markdown Comparing Infections and Deaths by StateUsing absolute values ###Code covidLastDayPie1 = covidLastDay[['estado','casosAcumulados', 'obitosAcumulados']] covidLastDayPie1.set_index('estado', inplace=True) plot = covidLastDayPie1.plot.pie(fontsize=24, subplots=True, layout=(1, 2), figsize=(30, 15), legend=None) ###Output _____no_output_____ ###Markdown Comparing Infections and Deaths per Million Inhabitans, per State ###Code covidLastDayPie2 = covidLastDay[['estado','normTotalInfections', 'normTotalDeaths']] covidLastDayPie2.set_index('estado', inplace=True) plot = covidLastDayPie2.plot.pie(fontsize=24, subplots=True, layout=(1, 2), figsize=(30, 15), legend=None) ###Output _____no_output_____ ###Markdown Plots of cumulative infections and deaths ###Code covid['data'] = pd.to_datetime(covid['data'], format='%Y-%m-%d') covidByDayBR = covid[['data', 'casosAcumulados', 'obitosAcumulados']].groupby('data').sum() covidByDayBR.reset_index(inplace=True) covidByDayBR.set_index(['data'],inplace=True) covidByDayRJ = covid.loc[covid['estado'] == 'RJ'][['data', 'casosAcumulados', 'obitosAcumulados', 'normTotalInfections', 'normTotalDeaths']] covidByDayRJ.reset_index(inplace=True) covidByDayRJ.set_index(['data'],inplace=True) axis = covidByDayBR.plot(legend=False, style='o-', figsize=(18, 9), y='casosAcumulados', title='Infections in Brazil') axis.xaxis.get_label().set_visible(False) axis = covidByDayBR.plot(legend=False, style='o-',figsize=(18, 9), y='obitosAcumulados', title='Deaths in Brazil') axis.xaxis.get_label().set_visible(False) axis = covidByDayRJ.plot(legend=False,style='o-',figsize=(18, 9), y='casosAcumulados', title='Infections in RJ') axis.xaxis.get_label().set_visible(False) axis = covidByDayRJ.plot(legend=False, style='o-',figsize=(18, 9), y='obitosAcumulados', title='Deaths in RJ') axis.xaxis.get_label().set_visible(False) ###Output _____no_output_____ ###Markdown Plots of new deaths for Brazil, RJ state and Rio de Janeiro cityRolling mean of the last 7 days for the new deaths by day. ###Code newCovidByDayBR = covid[['data', 'obitosNovos']].groupby('data').sum() newCovidByDayBR.reset_index(inplace=True) newCovidByDayBR.set_index(['data'],inplace=True) newCovidByDayBR['rollingNewDeaths'] = newCovidByDayBR['obitosNovos'].rolling(window=7).mean() axis = newCovidByDayBR.plot(legend=False, style='o-', figsize=(18, 9), y='rollingNewDeaths', title='New deaths in Brazil by day (rolling mean of 7 days)') axis.xaxis.get_label().set_visible(False) newCovidByDayRJ = covid.loc[covid['estado'] == 'RJ'][['data', 'obitosNovos']] newCovidByDayRJ.reset_index(inplace=True) newCovidByDayRJ.set_index(['data'],inplace=True) newCovidByDayRJ['rollingNewDeaths'] = newCovidByDayRJ['obitosNovos'].rolling(window=7).mean() axis = newCovidByDayRJ.plot(legend=False, style='o-', figsize=(18, 9), y='rollingNewDeaths', title='New deaths in RJ state by day (rolling mean of 7 days)') axis.xaxis.get_label().set_visible(False) newCovidByDayRio = covidRioDeJaneiro[['data', 'obitosNovos']] newCovidByDayRio['data'] = pd.to_datetime(newCovidByDayRio['data'], format='%Y-%m-%d') newCovidByDayRio.reset_index(inplace=True) newCovidByDayRio.set_index(['data'],inplace=True) newCovidByDayRio['rollingNewDeaths'] = newCovidByDayRio['obitosNovos'].rolling(window=7).mean() axis = newCovidByDayRio.plot(legend=False, style='o-', figsize=(18, 9), y='rollingNewDeaths', title='New deaths in Rio de Janeiro City by day (rolling mean of 7 days)') axis.xaxis.get_label().set_visible(False) newCovidByDayMangaratiba = covidMangaratiba[['data', 'obitosNovos']] newCovidByDayMangaratiba['data'] = pd.to_datetime(newCovidByDayMangaratiba['data'], format='%Y-%m-%d') newCovidByDayMangaratiba = newCovidByDayMangaratiba[(newCovidByDayMangaratiba['obitosNovos'] >= 0)] newCovidByDayMangaratiba.reset_index(inplace=True) newCovidByDayMangaratiba.set_index(['data'],inplace=True) axis = newCovidByDayMangaratiba.plot(legend=False, style='o-', markersize=5, drawstyle="steps-mid", figsize=(18, 9), y='obitosNovos', title='New deaths in Mangaratiba City by day (absolute values)') axis.xaxis.get_label().set_visible(False) plt.grid(axis='y', linestyle='-') ###Output _____no_output_____ ###Markdown Comparing tendencies of all states in the last 10 daysFor the log of the deaths data of the last 10 days for all states, I use Linear Regressions to estimate and compare how the situation is evolving. Higher coefficients means that situation is getting worse. ###Code deathsByDayStates = covid.pivot(index='data', columns='estado', values = 'normTotalDeaths') last10Days = deathsByDayStates.tail(10) axis = last10Days.plot(logy=True, figsize=(15,10), title='Last 10 Days Deaths per MM inhab., Log Scale').legend(bbox_to_anchor=(1.1, 1.01)) plt.gca().xaxis.get_label().set_visible(False) plt.tick_params( axis='x', which='both', bottom=False, top=False, labelbottom=False) plt.show() last10Days['day'] = range(1,11) X_train = last10Days['day'] X_train = X_train.values.reshape(-1, 1) data = {'state': [], 'coefficient': [] } coef10days = pd.DataFrame (data, columns = ['state','coefficient']) from sklearn.linear_model import LinearRegression regression_model = LinearRegression() # Evaluating coefficients for col in last10Days.columns: if col != 'day': last10Days[col + 'log'] = np.log(last10Days[col]) regression_model.fit(X_train, last10Days[col + 'log']) coef10days = coef10days.append(pd.Series([col, regression_model.coef_[0]], index=coef10days.columns), ignore_index=True) html_code = ''' <div style=" background-color:LightGoldenRodYellow; border-style: solid; padding-top: 10px; padding-right: 10px; padding-bottom: 40px; padding-left: 10px;"> <img style="float: right;" width=30px, height=30px src="info2.png"> <h2> Predictions for the Near Future </h2> <p style="font-size:18px"> Using the linear regression over the log data to estimate the number of deaths in 7, 14 e 30 days. </p> </div> ''' display(HTML(html_code)) ###Output _____no_output_____ ###Markdown All States ###Code from datetime import timedelta data = {'date': [], 'state': [], 'predictedDeaths': [] } predictedDeaths = pd.DataFrame (data, columns = ['date','state', 'predictedDeaths']) more7Days = 17; more14Days = 24; more30Days = 40; dateMore7Days = (datetime.strptime(lastDay, '%Y-%m-%d') + timedelta(days=7)).strftime("%d/%m/%Y") dateMore14Days = (datetime.strptime(lastDay, '%Y-%m-%d') + timedelta(days=14)).strftime("%d/%m/%Y") dateMore30Days = (datetime.strptime(lastDay, '%Y-%m-%d') + timedelta(days=30)).strftime("%d/%m/%Y") deathsByState = covid.pivot(index='data', columns='estado', values = 'obitosAcumulados') last10Days = deathsByState.tail(10) last10Days['day'] = range(1,11) for col in last10Days.columns: if col != 'day': last10Days[col + 'log'] = np.log(last10Days[col]) regression_model.fit(X_train, last10Days[col + 'log']) xpredict = np.array([more7Days]) xpredict = xpredict.reshape(-1,1) prediction = regression_model.predict(xpredict) predictedDeaths = predictedDeaths.append(pd.Series([dateMore7Days, col, int(np.exp(prediction[0]))], index=predictedDeaths.columns), ignore_index=True) xpredict = np.array([more14Days]) xpredict = xpredict.reshape(-1,1) prediction = regression_model.predict(xpredict) predictedDeaths = predictedDeaths.append(pd.Series([dateMore14Days, col, int(np.exp(prediction[0]))], index=predictedDeaths.columns), ignore_index=True) xpredict = np.array([more30Days]) xpredict = xpredict.reshape(-1,1) prediction = regression_model.predict(xpredict) predictedDeaths = predictedDeaths.append(pd.Series([dateMore30Days, col, int(np.exp(prediction[0]))], index=predictedDeaths.columns), ignore_index=True) predictedDeaths.index.name = None predictedDeathsOut = predictedDeaths.pivot(index='state', columns='date', values = 'predictedDeaths') predictedDeathsOut = predictedDeathsOut[[dateMore7Days, dateMore14Days, dateMore30Days]] predictedDeathsBrazil = predictedDeathsOut.sum().reset_index(name='predictedDeaths') predictedDeathsBrazil = predictedDeathsBrazil.append(pd.Series([lastDayFormatted, deathsBR], index=predictedDeathsBrazil.columns), ignore_index=True) predictedDeathsBrazil.set_index('date', inplace=True) predictedDeathsBrazilOut = predictedDeathsBrazil.transpose() predictedDeathsBrazilOut = predictedDeathsBrazilOut[[lastDayFormatted, dateMore7Days, dateMore14Days, dateMore30Days]] predictedDeathsOut['state'] = predictedDeathsOut.index covidLastDayOut = covidLastDay[['estado', 'obitosAcumulados']][['estado', 'obitosAcumulados']] covidLastDayOut.index.name = None predictedDeathsOut.index.name = None predictedDeathsOut = predictedDeathsOut.merge(covidLastDayOut, left_on='state', right_on='estado') predictedDeathsOut = predictedDeathsOut[['estado', 'obitosAcumulados', dateMore7Days, dateMore14Days, dateMore30Days]] predictedDeathsOut.columns = ['State', lastDayFormatted, dateMore7Days, dateMore14Days, dateMore30Days] pd.options.display.float_format = '{0:g}'.format display(predictedDeathsOut) ###Output _____no_output_____ ###Markdown Brazil ###Code predictedDeathsBrazilOut.columns = [lastDayFormatted, dateMore7Days, dateMore14Days, dateMore30Days] predictedDeathsBrazilOut.reset_index(inplace=True) predictedDeathsBrazilOut.drop('index', axis=1, inplace=True) display(predictedDeathsBrazilOut) coef10days.sort_values('coefficient',ascending=False,inplace=True) coef10days.set_index('state', inplace=True) html_code = ''' <div style=" background-color:LightGoldenRodYellow; border-style: solid; padding-top: 10px; padding-right: 10px; padding-bottom: 40px; padding-left: 10px;"> <img style="float: right;" width=30px, height=30px src="info2.png"> <h2> Clustering the states in 3 classes according to the calculated coefficients </h2> <p style="font-size:18px"> A higher coefficient means a worse tendency for the deaths. </p> </div> ''' display(HTML(html_code)) coef10daysData = pd.DataFrame(coef10days) coef10daysData['state'] = coef10daysData.index coef10daysData = coef10daysData.reset_index(level=0, drop=True) coef10daysData = coef10daysData[['state', 'coefficient']] coef10daysData['myClass'] = kmeans.fit_predict(coef10daysData[['coefficient']]) worstClass = coef10daysData.iloc[0]['myClass'] bestClass = coef10daysData.iloc[-1]['myClass'] display(coef10daysData.style.apply(highlight, axis=1)) coef10days['Normalized Coefficient'] = (coef10days['coefficient']-coef10days['coefficient'].min())/(coef10days['coefficient'].max()-coef10days['coefficient'].min()) _ = coef10days.plot.pie(y='Normalized Coefficient', figsize=(10, 10), legend=False) ###Output _____no_output_____ ###Markdown R curveEstimating R, that represents the basic reproduction rate of the disease. It roughly estimates how many people are infected by one infected person after 14 days, assuming a constant death rate for the infected.It is a very simple and imprecise approach. And, specially, the values in the begining of the plot should be disregarded, because the first notifications of the infections are not representative or reliable.The plot is more of a tendency indicator, than a real value estimator. ###Code data = {'date': [], 'value': [] } R_Brazil = pd.DataFrame (data, columns = ['date','value']) # Generation Time gt = 14 covidDeathsBR = covid.groupby('data')['obitosNovos'].sum().reset_index(name ='obitosNovos') covidDeathsBR['rollingMean'] =covidDeathsBR['obitosNovos'].rolling(window=7).mean() for day in range(covidDeathsBR.shape[0]): if day >= gt: if covidDeathsBR.at[day-gt,'rollingMean'] > 0: Rvalue = float(covidDeathsBR.at[day,'rollingMean'])/float(covidDeathsBR.at[day-gt,'rollingMean']) if Rvalue <= 4: R_Brazil = R_Brazil.append(pd.Series([covidDeathsBR.at[day, 'data'], Rvalue], index=R_Brazil.columns), ignore_index=True) R_Brazil['date'] = pd.to_datetime(R_Brazil['date'], format='%Y-%m-%d') R_Brazil.set_index('date', inplace=True) axis = R_Brazil.plot(style='o-', figsize=(18, 9), legend=False, y='value', title='R for Brazil') axis.axhline(1.0, color='gray', lw=2, alpha=0.5) axis.axhline(2.0, color='gray', lw=2, alpha=0.5) axis.axhline(3.0, color='gray', lw=2, alpha=0.5) axis.axhline(4.0, color='red', lw=2, alpha=0.5) axis.fill_between(axis.get_xlim(), 0., 1., facecolor='lightgreen', alpha=0.5) axis.xaxis.get_label().set_visible(False) plt.show() data = {'date': [], 'state': [], 'value': [] } R_States = pd.DataFrame (data, columns = ['date','state', 'value']) for state in covid.estado.unique(): covidState = covid.loc[covid['estado'] == state] covidDeathsState = covidState.groupby('data')['obitosNovos'].sum().reset_index(name ='obitosNovos') covidDeathsState['rollingMean'] =covidDeathsState['obitosNovos'].rolling(window=7).mean() for day in range(covidDeathsState.shape[0]): if day >= gt: if covidDeathsState.at[day-gt,'rollingMean'] > 0: Rvalue = float(covidDeathsState.at[day,'rollingMean'])/float(covidDeathsState.at[day-gt,'rollingMean']) R_States = R_States.append(pd.Series([covidDeathsState.at[day, 'data'], state, Rvalue], index=R_States.columns), ignore_index=True) R_States['date'] = pd.to_datetime(R_States['date'], format='%Y-%m-%d') R_StatesGroup = R_States.loc[R_States['state'].isin(['RJ', 'SP'])] R_StatesGroup = R_StatesGroup.pivot(index='date', columns='state', values = 'value') axis = R_StatesGroup.plot(style='o-', figsize=(18, 9), title='R for RJ and SP') axis.axhline(1.0, color='gray', lw=2, alpha=0.5) axis.axhline(2.0, color='gray', lw=2, alpha=0.5) axis.axhline(3.0, color='gray', lw=2, alpha=0.5) axis.axhline(4.0, color='red', lw=2, alpha=0.5) axis.fill_between(axis.get_xlim(), 0., 1., facecolor='lightgreen', alpha=0.5) plt.ylim((0,4)) axis.xaxis.get_label().set_visible(False) _ = plt.show() ###Output _____no_output_____
Coin Change.ipynb
###Markdown Coin ChangeYou are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return `-1`. Example 1```Input: coins = [1, 2, 5], amount = 11Output: 3 Explanation: 11 = 5 + 5 + 1``` Example 2```Input: coins = [2], amount = 3Output: -1``` Note You may asume you have an unlimited number of coins. ###Code from typing import List, Tuple class Solution: def coin_change(self, coins: List[int], amount: int) -> Tuple[int, List[int]]: coins.sort(reverse=True) coins_used = [] current_amount = 0 last_used_coin = None while current_amount < amount: if len(coins) < 1: break coin = coins[0] if (current_amount + coin) <= amount: coins_used.append(coin) current_amount += coin else: coins.pop(0) if current_amount == amount: return len(coins_used), coins_used return -1, coins_used ###Output _____no_output_____ ###Markdown example 3 coins = [186,419,83,408] amount = 6249 output = 20 ###Code coins = [186,419,83,408] amount = 6249 s = Solution() output, used_coins = s.coin_change(coins, amount) print(output) print(used_coins) assert output == 20 ###Output _____no_output_____ ###Markdown Ooops! it seems the last example broke the algorithm, let's try another solution ###Code from typing import List, Tuple import sys class Solution2: """ Let's try some backtracking """ def __init__(self): self.calculated_nodes = dict() def coin_change(self, coins:List[int], amount:int) -> int: if amount < 0: return -1 if amount == 0: return 0 if amount in self.calculated_nodes: return self.calculated_nodes[amount] minimum_level = sys.maxsize for coin in coins: n_minimum_level = self.coin_change(coins, amount - coin) if n_minimum_level >= 0 and n_minimum_level < minimum_level: minimum_level = n_minimum_level + 1 if minimum_level < sys.maxsize: self.calculated_nodes[amount] = minimum_level return self.calculated_nodes[amount] return -1 ## TEST 1 coins = [10, 20, 17] amount = 5 s = Solution2() output = s.coin_change(coins, amount) print(output) assert output == -1 ## TEST 2 coins = [10, 20, 17] amount = 10 s = Solution2() output = s.coin_change(coins, amount) print(output) assert output == 1 ## TEST 3 coins = [1, 2, 5] amount = 11 s = Solution2() output = s.coin_change(coins, amount) print(output) print(s.calculated_nodes) assert output == 3 ## TEST 4 coins = [1, 2, 3] amount = 4 s = Solution2() output = s.coin_change(coins, amount) print(output) print(s.calculated_nodes) assert output == 2 ## TEST 4 coins = [1, 2, 3] amount = 17 s = Solution2() output = s.coin_change(coins, amount) print(output) print(s.calculated_nodes) assert output == 6 ## TEST 5 coins = [186,111] amount = 6249 s = Solution2() output = s.coin_change(coins, amount) print(output) assert output == 34 # THIS APPROACH IS VERY SLOW ): class Solution3(object): def coin_change(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ rs = [amount+1] * (amount+1) rs[0] = 0 for i in range(1, amount+1): for c in coins: if i >= c: rs[i] = min(rs[i], rs[i-c] + 1) if rs[amount] == amount+1: return -1 return rs[amount] ## TEST 4 coins = [186,111] amount = 6249 s = Solution3() output = s.coin_change(coins, amount) print(output) assert output == 34 # THIS ONE IS INSANELY FAST COULD I RECREATE IT RECURSIVELY? def traverse_levels(limit:int, step:int=0, level:int=0) -> (int, int, int): if level == limit: return limit, step, level return traverse_levels(limit, step + 1, level + 1) print(traverse_levels(3)) from typing import List class Solution4(): def coinChange(self, coins:List[int], amount:int) -> int: change_arr = [amount + 1] * (amount + 1) change_arr[0] = 0 for i in range(1, amount + 1): for coin in coins: if i >= coin: change_arr[i] = min(change_arr[i], change_arr[i - coin] + 1) if change_arr[amount] == amount + 1: return - 1 return change_arr[amount] ## TEST 4 coins = [186,111] amount = 6249 s = Solution4() output = s.coinChange(coins, amount) print(output) assert output == 34 # THIS ONE IS INSANELY FAST COULD I RECREATE IT RECURSIVELY? ###Output 34
NumMeth_Khomutov_Task_9.1-2.29.ipynb
###Markdown Задание 1 ![image.png](attachment:image.png) ![image.png](attachment:image.png) Определим начальные данные задачи ###Code import matplotlib.pyplot as plt import numpy as np from scipy.misc import derivative %matplotlib inline def f(x): return 2* np.sin(x) - 3*np.cos(x) A = -1 B = 1 epselon = 10**(-6) ###Output _____no_output_____ ###Markdown Построим график функции $f(x)$ на рассматриваемом промежутке: ###Code X = np.arange(A, B, 0.01) fig = plt.figure() plt.plot(X, f(X)) plt.show() ###Output _____no_output_____ ###Markdown Из графика видно, что минимум функции достигается около точки -0.5, а максимум - на правом конце промежутка.Если производная на рассматриваемом отрезке будет иметь только один корень, то наше предположение верно. Производная $\frac{df(x)}{dx} = 2 cos(x) + 3 sin(x)$ ###Code def df(x): return 2* np.cos(x) + 3*np.sin(x) ###Output _____no_output_____ ###Markdown График производной: ###Code fig = plt.figure() plt.plot(X, df(X)) plt.show() ###Output _____no_output_____ ###Markdown Наше предположение оказалось верным, следовательно максимум функции $f(x)$ достигается в точке 1: ###Code Maxf = f(1) ###Output _____no_output_____ ###Markdown Определим метод Ньютона и применим его к производной функции $f(x)$, и найдем ее ноль (ноль производной): ###Code def NewtoonMethod(f,x,epselon): n = 1 x_old = x x_new = x_old - f(x_old)/derivative(f,x_old,dx = epselon) while abs(x_new - x_old) >= epselon: x_old = x_new x_new = x_old - f(x_old)/derivative(f,x_old,dx = epselon) n = n + 1 return x_new, n xmin, n = NewtoonMethod(df,-0.5,epselon) print(xmin,n) Minf = f(xmin) print('Max:',Maxf) print('Min:',Minf) ###Output Max: 0.0620350520114 Min: -3.60555127546 ###Markdown Следовательно,$$max(f(x)) = 0.0620350520114 \\min(f(x)) = -3.60555127546$$ Задание 2![image.png](attachment:image.png) ![image.png](attachment:image.png) ###Code a = 0 b = 5 def g(t): return t*np.sin(t) ###Output _____no_output_____ ###Markdown Построим график исходной функции: ###Code X = np.arange(a, b, 0.01) fig = plt.figure() plt.plot(X, g(X)) plt.show() ###Output _____no_output_____ ###Markdown Функция для вычисления чисел Фибоначчи: ###Code M = {0: 0, 1: 1} def fib(n): if n in M: return M[n] M[n] = fib(n - 1) + fib(n - 2) return M[n] ###Output _____no_output_____ ###Markdown Определим метод Фибоначчи: ###Code def Fibonacci(g,a,b,epselon): n = 1; while (b-a)/fib(n) > epselon: n=n+1 print(n) while n != 1: lamda = a + (b - a)*(fib(n-2)/fib(n)) mu = a + (b - a)*(fib(n-1)/fib(n)) if g(lamda) <= g(mu): b = mu x = lamda else: a = lamda x = mu n -= 1 if n == 1: return((mu + lamda) / 2) return x ###Output _____no_output_____ ###Markdown Метод Фибоначчи работает для нахожданения минимума, поэтому определим функцию $-g(x)$: ###Code def gminus(x): return -g(x) ###Output _____no_output_____ ###Markdown Найдем точку максимума функции. Из графика видо, что эта точка лежит в промежутке [1;3]: ###Code xmax = Fibonacci(gminus,1,3,epselon) xmax ###Output 32 ###Markdown Найдем точку максимума функции. Из графика - точка лежит в промежутке [4;5]: ###Code xmin = Fibonacci(g,4,5,epselon) xmin print("Max: ",g(xmax)) print("MIn: ",g(xmin)) ###Output Max: 1.81970574116 MIn: -4.81446988971
misc-notebooks/MR2007-final-exam-spring-2017.ipynb
###Markdown Problem 1 (a)Zero-order-hold sampling ###Code s,z,h = sy.symbols('s,z,h') sy.apart(1/(s*(s+1)*(s-1))) num = (0.5 * (sy.exp(h) + sy.exp(-h)) - 1)*(z+1) den = (z**2 - (sy.exp(h) + sy.exp(-h))*z + 1) H = num/den print num H.subs(h, 0.1) ###Output _____no_output_____ ###Markdown (b)Backward difference approximation of given continuous-time controller ###Code s,z = sy.symbols('s, z') h = sy.symbols('h', positive=True) F = (16*s+1)/(100*s+1) H = sy.simplify(F.subs(s, (z-1)/(z*h))) print H p1,p2,p3,p4 = sy.symbols('p1, p2, p3, p4') sy.expand((z-0.7+sy.I*0.1)*(z-0.7-sy.I*0.1)) ###Output _____no_output_____ ###Markdown Problem 2Calculate the pulse transfer function from the disturbance d to the output y ###Code B = 0.6*z + 0.5 A = z*(z**2 - 1.9*z + 0.9) S, R = sy.symbols('S, R') H_dy = (B/A) / (1 + (B/A)*(S/R)) sy.simplify(H_dy) H_dys = sy.simplify(H_dy) s=sy.latex(H_dys) print s ###Output \frac{R \left(0.6 z + 0.5\right)}{R z \left(z^{2} - 1.9 z + 0.9\right) + S \left(0.6 z + 0.5\right)} ###Markdown Problem 3Find the diagonal form of the system in problem 2, but without the time delay ###Code H = sy.simplify(z*B/A) sy.apart(H) ###Output _____no_output_____ ###Markdown Set up the state-space model. Make sure it is correct. ###Code Phi = sy.Matrix([[0.9, 0], [0, 1]]) Gamma = sy.Matrix([[1],[1]]) Cm = sy.Matrix([[-10.4, 11.0]]) Htest = Cm*(z*sy.eye(2)-Phi).inv()*Gamma Htest H ###Output _____no_output_____ ###Markdown (c) Finding the feedback gain ###Code l1, l2 = sy.symbols('l1, l2') L = sy.Matrix([[l1, l2]]) sy.factor((z*sy.eye(2) - (Phi - Gamma*L)).det(), z) sy.simplify(sy.expand((z-0.6+sy.I*0.3)*(z-0.6-sy.I*0.3))) ###Output _____no_output_____ ###Markdown Check with matlab ###Code %load_ext pymatbridge %%matlab --size 800,400 %Mean arteriar pressure, automatic anasthesia model % Plant G = tf([1], [120 1 0]); a = 1/100; b = 1/160; F0 = zpk([-b], [-a], a/b); K = 1e-2; F = K*F0; Gc = feedback(G*F, 1); step(Gc, 1000) %%matlab --size 800,400 -o L,y,t h = 40.0; Phi = [0.9 0; 0 1]; Gamma = [1;1]; C = [-10.4 11.0]; D = 0; sys = ss(Phi, Gamma, C, D, h); L = place(Phi, Gamma, [0.6+i*0.3 0.6-i*0.3]); sys_cl = ss(Phi-Gamma*L, Gamma, C, D, h); [y, t] = step(sys_cl); import matplotlib.pyplot as plt %matplotlib inline plt.figure() plt.plot(t, y) ###Output _____no_output_____
inclass/inclass3-abpwrs/1_Introduction_to_the_Insight_Toolkit.ipynb
###Markdown Introduction to the Insight Toolkit (ITK) Learning Objectives* Learn how to **run** cells in **a Jupyter Notebook*** Run a segmentation example that demonstrates **ITK**'s ability to provide **insight into images*** Understand the **purpose and capabilities** of the toolkit Jupyter Notebooks These are [Jupyter Notebooks](https://jupyter.org/), an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text.To run cells in the notebook, press *shift + enter*.For more information, see the [Notebook Help](https://nbviewer.jupyter.org/github/ipython/ipython/blob/3.x/examples/Notebook/Index.ipynb) Insight Into Images ###Code import itk from itkwidgets import view file_name = 'data/brainweb165a10f17.mha' image = itk.imread(file_name, itk.ctype('float')) view(image, slicing_planes=True, gradient_opacity=0.8, ui_collapsed=True) # Smooth the image smoothed = itk.curvature_flow_image_filter(image, number_of_iterations=6, time_step=0.005) view(smoothed, slicing_planes=True, gradient_opacity=0.8, ui_collapsed=True) # Segment the white matter with a 3D region-growing algorithm confidence_connected = itk.ConfidenceConnectedImageFilter.New(smoothed) confidence_connected.SetMultiplier(2.5) confidence_connected.SetNumberOfIterations(5) confidence_connected.SetInitialNeighborhoodRadius(2) confidence_connected.SetReplaceValue(255) confidence_connected.AddSeed([118, 133, 92]) #confidence_connected.AddSeed([63, 135, 94]) #confidence_connected.AddSeed([63, 157, 90]) #confidence_connected.AddSeed([111, 150, 90]) #confidence_connected.AddSeed([111, 50, 88]) confidence_connected.Update() view(confidence_connected, ui_collapsed=True, cmap='BuPu', shadow=False, annotations=False) ###Output _____no_output_____
1_2_Convolutional_Filters_Edge_Detection/1. Fourier Transform.ipynb
###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.grid(False) ax2.grid(False) ax3.grid(False) ax4.grid(False) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ax1.grid(False) ax2.grid(False) ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) # 1 row and 2 columns print(type(f)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown FiltersIn addition to taking advantage of color information, we also have knowledge about patterns of grayscale intensity in an image. Intensity is a measure of light and dark similar to brightness, and we can often identify the edges of an object by looking at an abrupt change in intensity, which happens when an image changes from a very dark to light area, or vice versa.To detect these changes, you’ll be using and creating specific image filters that look at groups of pixels and detect big changes in intensity in an image. These filters produce an output that shows these edges. Frequency in imagesWe have an intuition of what frequency means when it comes to sound. High-frequency is a high pitched noise, like a bird chirp or violin. And low frequency sounds are low pitch, like a deep voice or a bass drum. For sound, frequency actually refers to how fast a sound wave is oscillating; oscillations are usually measured in cycles/s (Hz), and high pitches and made by high-frequency waves. High and low frequency:Similarly, frequency in images is a rate of change. Images change in space, and a high frequency image is one where the intensity changes a lot. And the level of brightness changes quickly from one pixel to the next. A low frequency image may be one that is relatively uniform in brightness or changes very slowly. * High-frequency components also correspond to the edges of objects in images, which can help us classify those objects. * Fourier TransformsThe Fourier Transform (FT) is an important image processing tool which is used to decompose an image into its frequency components. The output of an FT represents the image in the frequency domain, while the input image is the spatial domain (x, y) equivalent. In the frequency domain image, each point represents a particular frequency contained in the spatial domain image. So, for images with a lot of high-frequency components (edges, corners, and stripes), there will be a number of points in the frequency domain at high frequency values.The frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) # scaled fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image) ax2.set_title('gray') ax2.imshow(gray, cmap='gray') ax3.set_title('normalized image') ax3.imshow(norm_image) ax4.set_title('frequency transform image') ax4.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') print(f_image.shape) # dsize dsize = (400, 300) # resize image output = cv2.resize(f_image, dsize) plt.imshow(output, cmap='gray') print(output.shape) cv2.imwrite( "fourier.jpg", output ); ###Output (396, 726) (300, 400) ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____ ###Markdown Fourier TransformsThe frequency components of an image can be displayed after doing a Fourier Transform (FT). An FT looks at the components of an image (edges that are high-frequency, and areas of smooth color as low-frequency), and plots the frequencies that occur as points in spectrum.In fact, an FT treats patterns of intensity in an image as sine waves with a particular frequency, and you can look at an interesting visualization of these sine wave components [on this page](https://plus.maths.org/content/fourier-transforms-images).In this notebook, we'll first look at a few simple image patterns to build up an idea of what image frequency components look like, and then transform a more complex image to see what it looks like in the frequency domain. ###Code import numpy as np import matplotlib.pyplot as plt import cv2 %matplotlib inline # Read in the images image_stripes = cv2.imread('images/stripes.jpg') # Change color to RGB (from BGR) image_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_BGR2RGB) # Read in the images image_solid = cv2.imread('images/pink_solid.jpg') # Change color to RGB (from BGR) image_solid = cv2.cvtColor(image_solid, cv2.COLOR_BGR2RGB) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5)) ax1.imshow(image_stripes) ax2.imshow(image_solid) # convert to grayscale to focus on the intensity patterns in the image gray_stripes = cv2.cvtColor(image_stripes, cv2.COLOR_RGB2GRAY) gray_solid = cv2.cvtColor(image_solid, cv2.COLOR_RGB2GRAY) # normalize the image color values from a range of [0,255] to [0,1] for further processing norm_stripes = gray_stripes/255.0 norm_solid = gray_solid/255.0 # perform a fast fourier transform and create a scaled, frequency transform image def ft_image(norm_image): '''This function takes in a normalized, grayscale image and returns a frequency spectrum transform of that image. ''' f = np.fft.fft2(norm_image) fshift = np.fft.fftshift(f) frequency_tx = 20*np.log(np.abs(fshift)) return frequency_tx # Call the function on the normalized images # and display the transforms f_stripes = ft_image(norm_stripes) f_solid = ft_image(norm_solid) # display the images # original images to the left of their frequency transform f, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, figsize=(20,10)) ax1.set_title('original image') ax1.imshow(image_stripes) ax2.set_title('frequency transform image') ax2.imshow(f_stripes, cmap='gray') ax3.set_title('original image') ax3.imshow(image_solid) ax4.set_title('frequency transform image') ax4.imshow(f_solid, cmap='gray') ###Output _____no_output_____ ###Markdown Low frequencies are at the center of the frequency transform image. The transform images for these example show that the solid image has most low-frequency components (as seen by the center bright spot). The stripes tranform image contains low-frequencies for the areas of white and black color and high frequencies for the edges in between those colors. The stripes transform image also tells us that there is one dominating direction for these frequencies; vertical stripes are represented by a horizontal line passing through the center of the frequency transform image.Next, let's see what this looks like applied to a real-world image. ###Code # Read in an image image = cv2.imread('images/birds.jpg') # Change color to RGB (from BGR) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to grayscale gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # normalize the image norm_image = gray/255.0 f_image = ft_image(norm_image) # Display the images f, (ax1,ax2) = plt.subplots(1, 2, figsize=(20,10)) ax1.imshow(image) ax2.imshow(f_image, cmap='gray') ###Output _____no_output_____
notebooks/python_interface.ipynb
###Markdown HugeCTR Python Interface OverviewIn HugeCTR version 2.3, we've integrated the Python interface, which supports setting data source and model oversubscribing during training. This notebook explains how to access and use the HugeCTR Python interface. For more details of the usage of Python API, please refer to [HugeCTR Python Interface](../docs/python_interface.md). Table of Contents1. [Build the HugeCTR Python Interface](1)1. [Wide & Deep Demo](2)1. [API Signatures](3) 1. Access the HugeCTR Python Interface1. Please make sure that you have started the notebook inside the running NGC docker container: `nvcr.io/nvidia/merlin/merlin-training:0.5`. A dynamic link to the `hugectr.so` library is installed to the system path `/usr/local/hugectr/lib/`. Besides, this system path is added to the environment variable `PYTHONPATH`, which means that you can use the Python interface within the docker container environment. Check the dynamic link with the following command: ###Code !ls /usr/local/hugectr/lib ###Output _____no_output_____ ###Markdown 2. Import HugeCTR, in order to train your model with Python as shown here: ###Code import hugectr ###Output _____no_output_____ ###Markdown 2. Wide & Deep Demo 2.1 Download and Preprocess Data1. Go to [this link](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/), and download one of 24 files into the directory `${project_root}/tools`, or execute the following command: ``` $ cd ${project_root}/tools $ wget http://azuremlsampleexperiments.blob.core.windows.net/criteo/day_1.gz ```2. Extract the dataset using the following command: ```shell $ tar zxvf day_1.gz ```3. Preprocess the data using `tools/preprocess.sh`: ```shell $ bash tools/preprocess.sh 1 wdl_data pandas 1 1 100 using pandas to preprocess the data ``` The command will run about half an hour, finally generate a folder named wdl_data containing preprocessed data. For more detailed usage of `tools/preprocess.sh`, please refer the Preprocessing by Pandas in [samples/wdl](../samples/wdl/README.md) 2.2 Train from scratchWe can train fom scratch and store the trained dense model and embedding tables in model files by doing the following: 1. Create a JSON file for the W&D model. **NOTE**: Please note that the solver clause no longer needs to be added to the JSON file when using the Python interface. Instead, you can configure the parameters using `hugectr.solver_parser_helper()` directly in the Python interface. ###Code %%writefile wdl_1gpu.json { "optimizer": { "type": "Adam", "update_type": "Global", "adam_hparam": { "learning_rate": 0.001, "beta1": 0.9, "beta2": 0.999, "epsilon": 0.0000001 } }, "layers": [ { "name": "data", "type": "Data", "source": "wdl_data/file_list.0.txt", "eval_source": "wdl_data/file_list.8.txt", "check": "Sum", "label": { "top": "label", "label_dim": 1 }, "dense": { "top": "dense", "dense_dim": 13 }, "sparse": [ { "top": "wide_data", "type": "DistributedSlot", "max_feature_num_per_sample": 30, "slot_num": 1 }, { "top": "deep_data", "type": "DistributedSlot", "max_feature_num_per_sample": 30, "slot_num": 26 } ] }, { "name": "sparse_embedding2", "type": "DistributedSlotSparseEmbeddingHash", "bottom": "wide_data", "top": "sparse_embedding2", "sparse_embedding_hparam": { "max_vocabulary_size_per_gpu": 5863985, "embedding_vec_size": 1, "combiner": 0 } }, { "name": "sparse_embedding1", "type": "DistributedSlotSparseEmbeddingHash", "bottom": "deep_data", "top": "sparse_embedding1", "sparse_embedding_hparam": { "max_vocabulary_size_per_gpu": 5863985, "embedding_vec_size": 16, "combiner": 0 } }, { "name": "reshape1", "type": "Reshape", "bottom": "sparse_embedding1", "top": "reshape1", "leading_dim": 416 }, { "name": "reshape2", "type": "Reshape", "bottom": "sparse_embedding2", "top": "reshape2", "leading_dim": 1 }, { "name": "concat1", "type": "Concat", "bottom": [ "reshape1", "dense" ], "top": "concat1" }, { "name": "fc1", "type": "InnerProduct", "bottom": "concat1", "top": "fc1", "fc_param": { "num_output": 1024 } }, { "name": "relu1", "type": "ReLU", "bottom": "fc1", "top": "relu1" }, { "name": "dropout1", "type": "Dropout", "rate": 0.5, "bottom": "relu1", "top": "dropout1" }, { "name": "fc2", "type": "InnerProduct", "bottom": "dropout1", "top": "fc2", "fc_param": { "num_output": 1024 } }, { "name": "relu2", "type": "ReLU", "bottom": "fc2", "top": "relu2" }, { "name": "dropout2", "type": "Dropout", "rate": 0.5, "bottom": "relu2", "top": "dropout2" }, { "name": "fc4", "type": "InnerProduct", "bottom": "dropout2", "top": "fc4", "fc_param": { "num_output": 1 } }, { "name": "add1", "type": "Add", "bottom": [ "fc4", "reshape2" ], "top": "add1" }, { "name": "loss", "type": "BinaryCrossEntropyLoss", "bottom": [ "add1", "label" ], "top": "loss" } ] } ###Output Writing wdl_1gpu.json ###Markdown 2. Write the Python script. Ensure that the `repeat_dataset` parameter is set to `False` within the script, which indicates that the file list needs to be specified before submitting the sess.train() or sess.evaluation() calls. Additionally, be sure to create a write-enabled directory for storing the temporary files for model oversubscribing. ###Code %%writefile wdl_from_scratch.py from hugectr import Session, solver_parser_helper import sys from mpi4py import MPI def train_from_scratch(json_file): dataset = [("./wdl_data/file_list."+str(i)+".txt", "./wdl_data/file_list."+str(i)+".keyset") for i in range(8)] solver_config = solver_parser_helper(seed = 0, batchsize = 16384, batchsize_eval =16384, model_file = "", embedding_files = [], vvgpu = [[0]], use_mixed_precision = False, scaler = 1.0, i64_input_key = False, use_algorithm_search = True, use_cuda_graph = True, repeat_dataset = False ) sess = Session(solver_config, json_file, True, "./temp_embedding") data_reader_train = sess.get_data_reader_train() data_reader_eval = sess.get_data_reader_eval() data_reader_eval.set_source("./wdl_data/file_list.8.txt") model_oversubscriber = sess.get_model_oversubscriber() iteration = 0 for file_list, keyset_file in dataset: data_reader_train.set_source(file_list) model_oversubscriber.update(keyset_file) while True: good = sess.train() if good == False: break if iteration % 100 == 0: sess.check_overflow() sess.copy_weights_for_evaluation() data_reader_eval = sess.get_data_reader_eval() good_eval = True j = 0 while good_eval: if j >= solver_config.max_eval_batches: break good_eval = sess.eval() j += 1 if good_eval == False: data_reader_eval.set_source() metrics = sess.get_eval_metrics() print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iteration, metrics)) iteration += 1 print("[HUGECTR][INFO] trained with data in {}".format(file_list)) sess.download_params_to_files("./", iteration) if __name__ == "__main__": json_file = sys.argv[1] train_from_scratch(json_file) %%writefile wdl_from_scratch.sh mkdir -p temp_embedding && \ python3 wdl_from_scratch.py wdl_1gpu.json !bash wdl_from_scratch.sh ###Output [02d01h44m32s][HUGECTR][INFO]: Global seed is 3078712038 [02d01h44m35s][HUGECTR][INFO]: Peer-to-peer access cannot be fully enabled. Device 0: GeForce RTX 2080 Ti [02d01h44m35s][HUGECTR][INFO]: cache_eval_data is not specified using default: 0 [02d01h44m35s][HUGECTR][INFO]: num_workers is not specified using default: 12 [02d01h44m35s][HUGECTR][INFO]: num of DataReader workers: 12 [02d01h44m35s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [02d01h44m35s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [02d01h44m35s][HUGECTR][INFO]: num_internal_buffers 1 [02d01h44m35s][HUGECTR][INFO]: num_internal_buffers 1 [02d01h44m35s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [02d01h44m35s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [02d01h45m16s][HUGECTR][INFO]: Traning from scratch, no snapshot file specified [02d01h45m16s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h45m16s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h45m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 0 [02d01h45m17s][HUGECTR][INFO]: Done [02d01h45m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 0 [02d01h45m17s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 0, metrics: [('AUC', 0.4978078603744507)] [HUGECTR][INFO] iter: 100, metrics: [('AUC', 0.7422940731048584)] [HUGECTR][INFO] iter: 200, metrics: [('AUC', 0.7539043426513672)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.0.txt [02d01h46m01s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m01s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m02s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m02s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m03s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 442 [02d01h46m03s][HUGECTR][INFO]: Done [02d01h46m04s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 820 [02d01h46m04s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 300, metrics: [('AUC', 0.7544599771499634)] [HUGECTR][INFO] iter: 400, metrics: [('AUC', 0.760477602481842)] [HUGECTR][INFO] iter: 500, metrics: [('AUC', 0.7647740244865417)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.1.txt [02d01h46m27s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m27s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m27s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m27s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m29s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 555 [02d01h46m29s][HUGECTR][INFO]: Done [02d01h46m30s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 960 [02d01h46m30s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 600, metrics: [('AUC', 0.7630816698074341)] [HUGECTR][INFO] iter: 700, metrics: [('AUC', 0.7671055793762207)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.2.txt [02d01h46m54s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m54s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m54s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m54s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m56s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 619 [02d01h46m56s][HUGECTR][INFO]: Done [02d01h46m58s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1014 [02d01h46m58s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 800, metrics: [('AUC', 0.7653575539588928)] [HUGECTR][INFO] iter: 900, metrics: [('AUC', 0.7663018703460693)] [HUGECTR][INFO] iter: 1000, metrics: [('AUC', 0.7696205973625183)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.3.txt [02d01h47m21s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m21s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m22s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m22s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m24s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 660 [02d01h47m24s][HUGECTR][INFO]: Done [02d01h47m25s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1036 [02d01h47m25s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1100, metrics: [('AUC', 0.7684949636459351)] [HUGECTR][INFO] iter: 1200, metrics: [('AUC', 0.7705279588699341)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.4.txt [02d01h47m48s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m48s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m49s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m49s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 690 [02d01h47m51s][HUGECTR][INFO]: Done [02d01h47m52s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1048 [02d01h47m52s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1300, metrics: [('AUC', 0.7694478631019592)] [HUGECTR][INFO] iter: 1400, metrics: [('AUC', 0.7711092233657837)] [HUGECTR][INFO] iter: 1500, metrics: [('AUC', 0.7721576690673828)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.5.txt [02d01h48m15s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m15s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m15s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m15s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 714 [02d01h48m17s][HUGECTR][INFO]: Done [02d01h48m19s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1052 [02d01h48m19s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1600, metrics: [('AUC', 0.7716618776321411)] [HUGECTR][INFO] iter: 1700, metrics: [('AUC', 0.7727320790290833)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.6.txt [02d01h48m42s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m42s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m42s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m42s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m44s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 733 [02d01h48m44s][HUGECTR][INFO]: Done [02d01h48m46s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1055 [02d01h48m46s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1800, metrics: [('AUC', 0.771714985370636)] [HUGECTR][INFO] iter: 1900, metrics: [('AUC', 0.7732033729553223)] [HUGECTR][INFO] iter: 2000, metrics: [('AUC', 0.773735761642456)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.7.txt [02d01h49m09s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [02d01h49m09s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [02d01h49m09s][HUGECTR][INFO]: Done [02d01h49m09s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [02d01h49m09s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [02d01h49m09s][HUGECTR][INFO]: Done ###Markdown 2.3 Train from stored modelCheck the stored model files that will be used in the training. Dense model file embeddings should be passed to the respective model_file and embedding_files when calling `sess.solver_parser_helper()`. We will use the same JSON file and training data as the previous section. Also, all the other configurations for `solver_parser_helper` will also be the same. ###Code !ls *.model %%writefile wdl_from_stored.py from hugectr import Session, solver_parser_helper import sys from mpi4py import MPI def train_from_stored(json_file): dataset = [("./wdl_data/file_list."+str(i)+".txt", "./wdl_data/file_list."+str(i)+".keyset") for i in range(8)] solver_config = solver_parser_helper(seed = 0, batchsize = 16384, batchsize_eval =16384, model_file = "_dense_2016.model", embedding_files = ["0_sparse_2016.model", "1_sparse_2016.model"], vvgpu = [[0]], use_mixed_precision = False, scaler = 1.0, i64_input_key = False, use_algorithm_search = True, use_cuda_graph = True, repeat_dataset = False ) sess = Session(solver_config, json_file, True, "./temp_embedding") data_reader_train = sess.get_data_reader_train() data_reader_eval = sess.get_data_reader_eval() data_reader_eval.set_source("./wdl_data/file_list.8.txt") model_oversubscriber = sess.get_model_oversubscriber() iteration = 1260 for file_list, keyset_file in dataset: data_reader_train.set_source(file_list) model_oversubscriber.update(keyset_file) while True: good = sess.train() if good == False: break if iteration % 100 == 0: sess.check_overflow() sess.copy_weights_for_evaluation() data_reader_eval = sess.get_data_reader_eval() good_eval = True j = 0 while good_eval: if j >= solver_config.max_eval_batches: break good_eval = sess.eval() j += 1 if good_eval == False: data_reader_eval.set_source() metrics = sess.get_eval_metrics() print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iteration, metrics)) iteration += 1 print("[HUGECTR][INFO] trained with data in {}".format(file_list)) sess.download_params_to_files("./", iteration) if __name__ == "__main__": json_file = sys.argv[1] train_from_stored(json_file) %%writefile wdl_from_stored.sh mkdir -p temp_embedding && \ python3 wdl_from_stored.py wdl_1gpu.json !bash wdl_from_stored.sh ###Output [01d13h17m31s][HUGECTR][INFO]: Global seed is 431843434 [01d13h17m32s][HUGECTR][INFO]: Peer-to-peer access cannot be fully enabled. Device 0: GeForce RTX 2080 Ti [01d13h17m32s][HUGECTR][INFO]: cache_eval_data is not specified using default: 0 [01d13h17m32s][HUGECTR][INFO]: num_workers is not specified using default: 12 [01d13h17m32s][HUGECTR][INFO]: num of DataReader workers: 12 [01d13h17m32s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [01d13h17m32s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [01d13h17m32s][HUGECTR][INFO]: num_internal_buffers 1 [01d13h17m32s][HUGECTR][INFO]: num_internal_buffers 1 [01d13h17m37s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [01d13h17m37s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 Loading dense model: _dense_2016.model [01d13h18m24s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h18m24s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h18m25s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 442 [01d13h18m25s][HUGECTR][INFO]: Done [01d13h18m26s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 821 [01d13h18m26s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1300, metrics: [('AUC', 0.7719355821609497)] [HUGECTR][INFO] iter: 1400, metrics: [('AUC', 0.7717985510826111)] [HUGECTR][INFO] iter: 1500, metrics: [('AUC', 0.7717432379722595)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.0.txt [01d13h24m29s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h24m29s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h24m30s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h24m30s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h24m32s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 555 [01d13h24m32s][HUGECTR][INFO]: Done [01d13h24m33s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 959 [01d13h24m33s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1600, metrics: [('AUC', 0.7691575884819031)] [HUGECTR][INFO] iter: 1700, metrics: [('AUC', 0.7704731225967407)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.1.txt [01d13h32m47s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h32m47s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h32m48s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h32m48s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h32m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 618 [01d13h32m50s][HUGECTR][INFO]: Done [01d13h32m51s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1014 [01d13h32m51s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1800, metrics: [('AUC', 0.7660712599754333)] [HUGECTR][INFO] iter: 1900, metrics: [('AUC', 0.7713440656661987)] [HUGECTR][INFO] iter: 2000, metrics: [('AUC', 0.7716435790061951)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.2.txt [01d13h40m28s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h40m28s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h40m29s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h40m29s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h40m33s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 662 [01d13h40m33s][HUGECTR][INFO]: Done [01d13h40m35s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1038 [01d13h40m35s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2100, metrics: [('AUC', 0.7696810960769653)] [HUGECTR][INFO] iter: 2200, metrics: [('AUC', 0.7733792662620544)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.3.txt [01d13h46m58s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h46m58s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h46m59s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h46m59s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h47m02s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 691 [01d13h47m02s][HUGECTR][INFO]: Done [01d13h47m03s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1047 [01d13h47m03s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2300, metrics: [('AUC', 0.766020655632019)] [HUGECTR][INFO] iter: 2400, metrics: [('AUC', 0.7715461254119873)] [HUGECTR][INFO] iter: 2500, metrics: [('AUC', 0.7737528681755066)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.4.txt [01d13h50m45s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h50m45s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h50m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h50m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h50m48s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 714 [01d13h50m48s][HUGECTR][INFO]: Done [01d13h50m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1052 [01d13h50m50s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2600, metrics: [('AUC', 0.7713172435760498)] [HUGECTR][INFO] iter: 2700, metrics: [('AUC', 0.7725634574890137)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.5.txt [01d13h54m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h54m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h54m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h54m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h54m49s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 732 [01d13h54m49s][HUGECTR][INFO]: Done [01d13h54m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1053 [01d13h54m50s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2800, metrics: [('AUC', 0.7703032493591309)] [HUGECTR][INFO] iter: 2900, metrics: [('AUC', 0.7738139033317566)] [HUGECTR][INFO] iter: 3000, metrics: [('AUC', 0.7731029391288757)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.6.txt [01d13h58m38s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h58m38s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h58m39s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h58m39s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h58m42s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1006 [01d13h58m42s][HUGECTR][INFO]: Done [01d13h58m44s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1055 [01d13h58m44s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 3100, metrics: [('AUC', 0.7722951769828796)] [HUGECTR][INFO] iter: 3200, metrics: [('AUC', 0.7728274464607239)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.7.txt [01d14h03m34s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [01d14h03m34s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [01d14h03m34s][HUGECTR][INFO]: Done [01d14h03m34s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [01d14h03m34s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [01d14h03m34s][HUGECTR][INFO]: Done ###Markdown HugeCTR Python Interface OverviewIn HugeCTR version 2.3, we've integrated the Python interface, which supports setting data source and model oversubscribing during training. This notebook explains how to access and use the HugeCTR Python interface. For more details of the usage of Python API, please refer to [HugeCTR Python Interface](../docs/python_interface.md). Table of Contents1. [Build the HugeCTR Python Interface](1)1. [Wide & Deep Demo](2)1. [API Signatures](3) 1. Access the HugeCTR Python Interface1. Please make sure that you have started the notebook inside the running NGC docker container: `nvcr.io/nvidia/hugectr:v3.0`. A dynamic link to the `hugectr.so` library is installed to the system path `/usr/local/hugectr/lib/`. Besides, this system path is added to the environment variable `PYTHONPATH`, which means that you can use the Python interface within the docker container environment. Check the dynamic link with the following command: ###Code !ls /usr/local/hugectr/lib ###Output _____no_output_____ ###Markdown 2. Import HugeCTR, in order to train your model with Python as shown here: ###Code import hugectr ###Output _____no_output_____ ###Markdown 2. Wide & Deep Demo 2.1 Download and Preprocess Data1. Go to [this link](https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/), and download one of 24 files into the directory `${project_root}/tools`, or execute the following command: ``` $ cd ${project_root}/tools $ wget http://azuremlsampleexperiments.blob.core.windows.net/criteo/day_1.gz ```2. Extract the dataset using the following command: ```shell $ tar zxvf day_1.gz ```3. Preprocess the data using `tools/preprocess.sh`: ```shell $ bash tools/preprocess.sh 1 wdl_data pandas 1 1 100 using pandas to preprocess the data ``` The command will run about half an hour, finally generate a folder named wdl_data containing preprocessed data. For more detailed usage of `tools/preprocess.sh`, please refer the Preprocessing by Pandas in [samples/wdl](../samples/wdl/README.md) 2.2 Train from scratchWe can train fom scratch and store the trained dense model and embedding tables in model files by doing the following: 1. Create a JSON file for the W&D model. **NOTE**: Please note that the solver clause no longer needs to be added to the JSON file when using the Python interface. Instead, you can configure the parameters using `hugectr.solver_parser_helper()` directly in the Python interface. ###Code %%writefile wdl_1gpu.json { "optimizer": { "type": "Adam", "update_type": "Global", "adam_hparam": { "learning_rate": 0.001, "beta1": 0.9, "beta2": 0.999, "epsilon": 0.0000001 } }, "layers": [ { "name": "data", "type": "Data", "source": "wdl_data/file_list.0.txt", "eval_source": "wdl_data/file_list.8.txt", "check": "Sum", "label": { "top": "label", "label_dim": 1 }, "dense": { "top": "dense", "dense_dim": 13 }, "sparse": [ { "top": "wide_data", "type": "DistributedSlot", "max_feature_num_per_sample": 30, "slot_num": 1 }, { "top": "deep_data", "type": "DistributedSlot", "max_feature_num_per_sample": 30, "slot_num": 26 } ] }, { "name": "sparse_embedding2", "type": "DistributedSlotSparseEmbeddingHash", "bottom": "wide_data", "top": "sparse_embedding2", "sparse_embedding_hparam": { "max_vocabulary_size_per_gpu": 5863985, "embedding_vec_size": 1, "combiner": 0 } }, { "name": "sparse_embedding1", "type": "DistributedSlotSparseEmbeddingHash", "bottom": "deep_data", "top": "sparse_embedding1", "sparse_embedding_hparam": { "max_vocabulary_size_per_gpu": 5863985, "embedding_vec_size": 16, "combiner": 0 } }, { "name": "reshape1", "type": "Reshape", "bottom": "sparse_embedding1", "top": "reshape1", "leading_dim": 416 }, { "name": "reshape2", "type": "Reshape", "bottom": "sparse_embedding2", "top": "reshape2", "leading_dim": 1 }, { "name": "concat1", "type": "Concat", "bottom": [ "reshape1", "dense" ], "top": "concat1" }, { "name": "fc1", "type": "InnerProduct", "bottom": "concat1", "top": "fc1", "fc_param": { "num_output": 1024 } }, { "name": "relu1", "type": "ReLU", "bottom": "fc1", "top": "relu1" }, { "name": "dropout1", "type": "Dropout", "rate": 0.5, "bottom": "relu1", "top": "dropout1" }, { "name": "fc2", "type": "InnerProduct", "bottom": "dropout1", "top": "fc2", "fc_param": { "num_output": 1024 } }, { "name": "relu2", "type": "ReLU", "bottom": "fc2", "top": "relu2" }, { "name": "dropout2", "type": "Dropout", "rate": 0.5, "bottom": "relu2", "top": "dropout2" }, { "name": "fc4", "type": "InnerProduct", "bottom": "dropout2", "top": "fc4", "fc_param": { "num_output": 1 } }, { "name": "add1", "type": "Add", "bottom": [ "fc4", "reshape2" ], "top": "add1" }, { "name": "loss", "type": "BinaryCrossEntropyLoss", "bottom": [ "add1", "label" ], "top": "loss" } ] } ###Output Writing wdl_1gpu.json ###Markdown 2. Write the Python script. Ensure that the `repeat_dataset` parameter is set to `False` within the script, which indicates that the file list needs to be specified before submitting the sess.train() or sess.evaluation() calls. Additionally, be sure to create a write-enabled directory for storing the temporary files for model oversubscribing. ###Code %%writefile wdl_from_scratch.py from hugectr import Session, solver_parser_helper import sys def train_from_scratch(json_file): dataset = [("./wdl_data/file_list."+str(i)+".txt", "./wdl_data/file_list."+str(i)+".keyset") for i in range(8)] solver_config = solver_parser_helper(seed = 0, batchsize = 16384, batchsize_eval =16384, model_file = "", embedding_files = [], vvgpu = [[0]], use_mixed_precision = False, scaler = 1.0, i64_input_key = False, use_algorithm_search = True, use_cuda_graph = True, repeat_dataset = False ) sess = Session(solver_config, json_file, True, "./temp_embedding") data_reader_train = sess.get_data_reader_train() data_reader_eval = sess.get_data_reader_eval() data_reader_eval.set_source("./wdl_data/file_list.8.txt") model_oversubscriber = sess.get_model_oversubscriber() iteration = 0 for file_list, keyset_file in dataset: data_reader_train.set_source(file_list) model_oversubscriber.update(keyset_file) while True: good = sess.train() if good == False: break if iteration % 100 == 0: sess.check_overflow() sess.copy_weights_for_evaluation() data_reader_eval = sess.get_data_reader_eval() good_eval = True j = 0 while good_eval: if j >= solver_config.max_eval_batches: break good_eval = sess.eval() j += 1 if good_eval == False: data_reader_eval.set_source() metrics = sess.get_eval_metrics() print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iteration, metrics)) iteration += 1 print("[HUGECTR][INFO] trained with data in {}".format(file_list)) sess.download_params_to_files("./", iteration) if __name__ == "__main__": json_file = sys.argv[1] train_from_scratch(json_file) %%writefile wdl_from_scratch.sh mkdir -p temp_embedding && \ python3 wdl_from_scratch.py wdl_1gpu.json !bash wdl_from_scratch.sh ###Output [02d01h44m32s][HUGECTR][INFO]: Global seed is 3078712038 [02d01h44m35s][HUGECTR][INFO]: Peer-to-peer access cannot be fully enabled. Device 0: GeForce RTX 2080 Ti [02d01h44m35s][HUGECTR][INFO]: cache_eval_data is not specified using default: 0 [02d01h44m35s][HUGECTR][INFO]: num_workers is not specified using default: 12 [02d01h44m35s][HUGECTR][INFO]: num of DataReader workers: 12 [02d01h44m35s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [02d01h44m35s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [02d01h44m35s][HUGECTR][INFO]: num_internal_buffers 1 [02d01h44m35s][HUGECTR][INFO]: num_internal_buffers 1 [02d01h44m35s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [02d01h44m35s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [02d01h45m16s][HUGECTR][INFO]: Traning from scratch, no snapshot file specified [02d01h45m16s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h45m16s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h45m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 0 [02d01h45m17s][HUGECTR][INFO]: Done [02d01h45m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 0 [02d01h45m17s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 0, metrics: [('AUC', 0.4978078603744507)] [HUGECTR][INFO] iter: 100, metrics: [('AUC', 0.7422940731048584)] [HUGECTR][INFO] iter: 200, metrics: [('AUC', 0.7539043426513672)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.0.txt [02d01h46m01s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m01s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m02s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m02s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m03s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 442 [02d01h46m03s][HUGECTR][INFO]: Done [02d01h46m04s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 820 [02d01h46m04s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 300, metrics: [('AUC', 0.7544599771499634)] [HUGECTR][INFO] iter: 400, metrics: [('AUC', 0.760477602481842)] [HUGECTR][INFO] iter: 500, metrics: [('AUC', 0.7647740244865417)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.1.txt [02d01h46m27s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m27s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m27s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m27s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m29s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 555 [02d01h46m29s][HUGECTR][INFO]: Done [02d01h46m30s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 960 [02d01h46m30s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 600, metrics: [('AUC', 0.7630816698074341)] [HUGECTR][INFO] iter: 700, metrics: [('AUC', 0.7671055793762207)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.2.txt [02d01h46m54s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m54s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m54s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h46m54s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h46m56s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 619 [02d01h46m56s][HUGECTR][INFO]: Done [02d01h46m58s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1014 [02d01h46m58s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 800, metrics: [('AUC', 0.7653575539588928)] [HUGECTR][INFO] iter: 900, metrics: [('AUC', 0.7663018703460693)] [HUGECTR][INFO] iter: 1000, metrics: [('AUC', 0.7696205973625183)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.3.txt [02d01h47m21s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m21s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m22s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m22s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m24s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 660 [02d01h47m24s][HUGECTR][INFO]: Done [02d01h47m25s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1036 [02d01h47m25s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1100, metrics: [('AUC', 0.7684949636459351)] [HUGECTR][INFO] iter: 1200, metrics: [('AUC', 0.7705279588699341)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.4.txt [02d01h47m48s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m48s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m49s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h47m49s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h47m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 690 [02d01h47m51s][HUGECTR][INFO]: Done [02d01h47m52s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1048 [02d01h47m52s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1300, metrics: [('AUC', 0.7694478631019592)] [HUGECTR][INFO] iter: 1400, metrics: [('AUC', 0.7711092233657837)] [HUGECTR][INFO] iter: 1500, metrics: [('AUC', 0.7721576690673828)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.5.txt [02d01h48m15s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m15s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m15s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m15s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m17s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 714 [02d01h48m17s][HUGECTR][INFO]: Done [02d01h48m19s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1052 [02d01h48m19s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1600, metrics: [('AUC', 0.7716618776321411)] [HUGECTR][INFO] iter: 1700, metrics: [('AUC', 0.7727320790290833)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.6.txt [02d01h48m42s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m42s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m42s][HUGECTR][INFO]: Dump hash table from GPU0 [02d01h48m42s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [02d01h48m44s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 733 [02d01h48m44s][HUGECTR][INFO]: Done [02d01h48m46s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1055 [02d01h48m46s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1800, metrics: [('AUC', 0.771714985370636)] [HUGECTR][INFO] iter: 1900, metrics: [('AUC', 0.7732033729553223)] [HUGECTR][INFO] iter: 2000, metrics: [('AUC', 0.773735761642456)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.7.txt [02d01h49m09s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [02d01h49m09s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [02d01h49m09s][HUGECTR][INFO]: Done [02d01h49m09s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [02d01h49m09s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [02d01h49m09s][HUGECTR][INFO]: Done ###Markdown 2.3 Train from stored modelCheck the stored model files that will be used in the training. Dense model file embeddings should be passed to the respective model_file and embedding_files when calling `sess.solver_parser_helper()`. We will use the same JSON file and training data as the previous section. Also, all the other configurations for `solver_parser_helper` will also be the same. ###Code !ls *.model %%writefile wdl_from_stored.py from hugectr import Session, solver_parser_helper import sys def train_from_stored(json_file): dataset = [("./wdl_data/file_list."+str(i)+".txt", "./wdl_data/file_list."+str(i)+".keyset") for i in range(8)] solver_config = solver_parser_helper(seed = 0, batchsize = 16384, batchsize_eval =16384, model_file = "_dense_2016.model", embedding_files = ["0_sparse_2016.model", "1_sparse_2016.model"], vvgpu = [[0]], use_mixed_precision = False, scaler = 1.0, i64_input_key = False, use_algorithm_search = True, use_cuda_graph = True, repeat_dataset = False ) sess = Session(solver_config, json_file, True, "./temp_embedding") data_reader_train = sess.get_data_reader_train() data_reader_eval = sess.get_data_reader_eval() data_reader_eval.set_source("./wdl_data/file_list.8.txt") model_oversubscriber = sess.get_model_oversubscriber() iteration = 1260 for file_list, keyset_file in dataset: data_reader_train.set_source(file_list) model_oversubscriber.update(keyset_file) while True: good = sess.train() if good == False: break if iteration % 100 == 0: sess.check_overflow() sess.copy_weights_for_evaluation() data_reader_eval = sess.get_data_reader_eval() good_eval = True j = 0 while good_eval: if j >= solver_config.max_eval_batches: break good_eval = sess.eval() j += 1 if good_eval == False: data_reader_eval.set_source() metrics = sess.get_eval_metrics() print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iteration, metrics)) iteration += 1 print("[HUGECTR][INFO] trained with data in {}".format(file_list)) sess.download_params_to_files("./", iteration) if __name__ == "__main__": json_file = sys.argv[1] train_from_stored(json_file) %%writefile wdl_from_stored.sh mkdir -p temp_embedding && \ python3 wdl_from_stored.py wdl_1gpu.json !bash wdl_from_stored.sh ###Output [01d13h17m31s][HUGECTR][INFO]: Global seed is 431843434 [01d13h17m32s][HUGECTR][INFO]: Peer-to-peer access cannot be fully enabled. Device 0: GeForce RTX 2080 Ti [01d13h17m32s][HUGECTR][INFO]: cache_eval_data is not specified using default: 0 [01d13h17m32s][HUGECTR][INFO]: num_workers is not specified using default: 12 [01d13h17m32s][HUGECTR][INFO]: num of DataReader workers: 12 [01d13h17m32s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [01d13h17m32s][HUGECTR][INFO]: max_nnz is not specified using default: 30 [01d13h17m32s][HUGECTR][INFO]: num_internal_buffers 1 [01d13h17m32s][HUGECTR][INFO]: num_internal_buffers 1 [01d13h17m37s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 [01d13h17m37s][HUGECTR][INFO]: max_vocabulary_size_per_gpu_=5863985 Loading dense model: _dense_2016.model [01d13h18m24s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h18m24s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h18m25s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 442 [01d13h18m25s][HUGECTR][INFO]: Done [01d13h18m26s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 821 [01d13h18m26s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1300, metrics: [('AUC', 0.7719355821609497)] [HUGECTR][INFO] iter: 1400, metrics: [('AUC', 0.7717985510826111)] [HUGECTR][INFO] iter: 1500, metrics: [('AUC', 0.7717432379722595)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.0.txt [01d13h24m29s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h24m29s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h24m30s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h24m30s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h24m32s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 555 [01d13h24m32s][HUGECTR][INFO]: Done [01d13h24m33s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 959 [01d13h24m33s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1600, metrics: [('AUC', 0.7691575884819031)] [HUGECTR][INFO] iter: 1700, metrics: [('AUC', 0.7704731225967407)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.1.txt [01d13h32m47s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h32m47s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h32m48s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h32m48s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h32m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 618 [01d13h32m50s][HUGECTR][INFO]: Done [01d13h32m51s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1014 [01d13h32m51s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 1800, metrics: [('AUC', 0.7660712599754333)] [HUGECTR][INFO] iter: 1900, metrics: [('AUC', 0.7713440656661987)] [HUGECTR][INFO] iter: 2000, metrics: [('AUC', 0.7716435790061951)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.2.txt [01d13h40m28s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h40m28s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h40m29s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h40m29s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h40m33s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 662 [01d13h40m33s][HUGECTR][INFO]: Done [01d13h40m35s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1038 [01d13h40m35s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2100, metrics: [('AUC', 0.7696810960769653)] [HUGECTR][INFO] iter: 2200, metrics: [('AUC', 0.7733792662620544)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.3.txt [01d13h46m58s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h46m58s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h46m59s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h46m59s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h47m02s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 691 [01d13h47m02s][HUGECTR][INFO]: Done [01d13h47m03s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1047 [01d13h47m03s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2300, metrics: [('AUC', 0.766020655632019)] [HUGECTR][INFO] iter: 2400, metrics: [('AUC', 0.7715461254119873)] [HUGECTR][INFO] iter: 2500, metrics: [('AUC', 0.7737528681755066)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.4.txt [01d13h50m45s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h50m45s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h50m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h50m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h50m48s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 714 [01d13h50m48s][HUGECTR][INFO]: Done [01d13h50m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1052 [01d13h50m50s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2600, metrics: [('AUC', 0.7713172435760498)] [HUGECTR][INFO] iter: 2700, metrics: [('AUC', 0.7725634574890137)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.5.txt [01d13h54m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h54m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h54m46s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h54m46s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h54m49s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 732 [01d13h54m49s][HUGECTR][INFO]: Done [01d13h54m50s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1053 [01d13h54m50s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 2800, metrics: [('AUC', 0.7703032493591309)] [HUGECTR][INFO] iter: 2900, metrics: [('AUC', 0.7738139033317566)] [HUGECTR][INFO] iter: 3000, metrics: [('AUC', 0.7731029391288757)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.6.txt [01d13h58m38s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h58m38s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h58m39s][HUGECTR][INFO]: Dump hash table from GPU0 [01d13h58m39s][HUGECTR][INFO]: Write hash table <key,value> pairs to file [01d13h58m42s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1006 [01d13h58m42s][HUGECTR][INFO]: Done [01d13h58m44s][HUGECTR][INFO]: Start to upload embedding table file to GPUs, total loop_num: 1055 [01d13h58m44s][HUGECTR][INFO]: Done [HUGECTR][INFO] iter: 3100, metrics: [('AUC', 0.7722951769828796)] [HUGECTR][INFO] iter: 3200, metrics: [('AUC', 0.7728274464607239)] [HUGECTR][INFO] trained with data in ./wdl_data/file_list.7.txt [01d14h03m34s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [01d14h03m34s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [01d14h03m34s][HUGECTR][INFO]: Done [01d14h03m34s][HUGECTR][INFO]: Rank0: Dump hash table from GPU0 [01d14h03m34s][HUGECTR][INFO]: Rank0: Write hash table <key,value> pairs to file [01d14h03m34s][HUGECTR][INFO]: Done
s7/codes/.ipynb_checkpoints/nerc-checkpoint.ipynb
###Markdown NERC in NLTK ###Code from nltk import word_tokenize, pos_tag, ne_chunk sentence = "Mark Pedersen and John Smith are working at Google " + \ "since 1994 for $1000 per week." x = pos_tag(word_tokenize(sentence)) res = ne_chunk(x) print(res) res res = ne_chunk(x, binary=True) print(res) res ###Output (S (NE Mark/NNP Pedersen/NNP) and/CC (NE John/NNP Smith/NNP) are/VBP working/VBG at/IN (NE Google/NNP) since/IN 1994/CD for/IN $/$ 1000/CD per/IN week/NN ./.) ###Markdown NERC in CoreNLP ###Code from nltk.parse.corenlp import CoreNLPParser l = CoreNLPParser(url='http://localhost:9000').parse(word_tokenize(sentence)) ts = [t for t in l] len(ts) ts[0] ###Output _____no_output_____
RedesNeuronales/PruebaNuevoData.ipynb
###Markdown Encode ###Code Mean_encoded_subject_region = df.groupby(['Region'])['Decision'].mean().to_dict() df['Region'] = df['Region'].map(Mean_encoded_subject_region) #Mean_encoded_subject_quarter = df.groupby(['Delivery_Quarter'])['Decision'].mean().to_dict() #df['Delivery_Quarter'] = df['Delivery_Quarter'].map(Mean_encoded_subject_quarter) Mean_encoded_subject_owner = df.groupby(['Opportunity_Owner'])['Decision'].mean().to_dict() df['Opportunity_Owner'] = df['Opportunity_Owner'].map(Mean_encoded_subject_owner) ###Output _____no_output_____ ###Markdown Split ###Code X_test = df.tail(math.floor(df.shape[0]*0.20)).drop(columns = {'Decision'}) y_test = df.tail(math.floor(df.shape[0]*0.20))['Decision'] X_train = df.head(round(df.shape[0]*0.80)).drop(columns = {'Decision'}) y_train = df.head(round(df.shape[0]*0.80))['Decision'] X_train ###Output _____no_output_____ ###Markdown Regresion ###Code import sklearn as sk import sklearn.neural_network lr = 0.001 # learning rate nn = [2, 16, 8, 1] # número de neuronas por capa. # Creamos el objeto del modelo de red neuronal multicapa. modelRegresor = sk.neural_network.MLPRegressor(solver='adam', activation = 'logistic', learning_rate_init=lr, hidden_layer_sizes=tuple(nn[1:]), verbose=True, n_iter_no_change=1000, batch_size = 64) modelRegresor.fit(X_train, y_train) LinReg_pred = modelRegresor.predict(X_test) probando = pd.DataFrame() probando['target'] = LinReg_pred probando.describe() modelRegresor.score(X_test, y_test) loss = log_loss(y_test , LinReg_pred) print(f"Log loss is {loss}") #pickle.dump(modelRegresor, open('modeloRegresorRN.p', 'wb')) ###Output _____no_output_____ ###Markdown Kaggle ###Code DataFrame_test = pd.read_csv( "/home/bautista/Datos/Machine-Learning-Datos/FeatureEngineering/df_time_series_test.csv" ) DataFrame_test subir = pd.DataFrame() subir['Opportunity_ID'] = DataFrame_test['Opportunity_ID'] DataFrame_test = DataFrame_test.drop(columns = {'Opportunity_ID', 'Unnamed: 0', 'Delivery_Year', 'Delivery_Quarter'}).fillna(0) DataFrame_test ###Output _____no_output_____ ###Markdown Encoding ###Code DataFrame_test['Region'] = DataFrame_test['Region'].map(Mean_encoded_subject_region) #DataFrame_test['Delivery_Quarter'] = DataFrame_test['Delivery_Quarter'].map(Mean_encoded_subject_quarter) DataFrame_test['Opportunity_Owner'] = DataFrame_test['Opportunity_Owner'].map(Mean_encoded_subject_owner) DataFrame_test = DataFrame_test.fillna(0.406068) DataFrame_test.isnull().sum() ###Output _____no_output_____ ###Markdown Prediction ###Code pred_posta = modelRegresor.predict(DataFrame_test) subir['target'] = pred_posta subir.set_index('Opportunity_ID', inplace = True) subir subir.describe() plt.figure(figsize=(15,10)) plt.plot(pred_posta, 'gd', label='Redes Neuronales') plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) plt.ylabel('predicted') plt.xlabel('training samples') plt.legend(loc="best") plt.title('Regressor predictions and their average') plt.show() #subir.to_csv('RedesNeuronales_Entrega_8.csv') ###Output _____no_output_____
example_data/create_ndhm.ipynb
###Markdown **Generate NDHM** ###Code import sys import os import laspy import numpy as np from osgeo import gdal import time from progressbar import ProgressBar # Input files, parameters las_fn = './in2018_29651885_12_las13.las' dtm_fn = './in2018_29651885_12_dtm.img' ndhm_fn = './in2018_29651885_12_ndhm_5_ft.tif' out_format = 'GTiff' scale_factor = 2.0 # Open LiDAR file las = laspy.file.File(las_fn, mode='r') points = np.vstack((las.x, las.y, las.z)).transpose() num_points = points.shape[0] # Open DTM file dtm = gdal.Open(dtm_fn) # Check data info dtm_array = dtm.ReadAsArray() nrow = dtm_array.shape[0] ncol = dtm_array.shape[1] print('nrow and ncol:', nrow, ncol) gt = dtm.GetGeoTransform() print('Geotransform:', gt) print('Cellsize(x,y):', gt[1], gt[5]) ###Output _____no_output_____ ###Markdown **[Affine transform](http://resources.esri.com/help/9.3/arcgisengine/java/gp_toolref/coverage_tools/how_transform_coverage_works.htm)**Xgeo = gt[0] + (col \* gt[1])Ygeo = gt[3] + (row \* gt[5]) ###Code ul_x = gt[0] ul_y = gt[3] cs_x = gt[1] cs_y = gt[5] dtm.GetProjection() nrow_out = int(nrow/scale_factor) ncol_out = int(ncol/scale_factor) cs_x_out = cs_x * scale_factor cs_y_out = cs_y * scale_factor gt_out = [ul_x, cs_x_out, gt[2], ul_y, gt[4], cs_y_out] # Close las file las.close() start = time.time() # Initialize DSM and NDHM array dsm = np.zeros((nrow_out, ncol_out), dtype=np.float32) ndhm = np.zeros((nrow_out, ncol_out), dtype=np.float32) pbar = ProgressBar() for i in pbar(range(num_points)): # Get LiDAR point p_x = points[i,0] p_y = points[i,1] p_z = points[i,2] # Get row, col index in image of the point col = int((p_x - ul_x) / cs_x_out) row = int((p_y - ul_y) / cs_y_out) col_dtm = int(col * scale_factor) row_dtm = int(row * scale_factor) # Check points outside DTM boundary if col < 0 or col >= ncol_out: print('x out of dtm boundary', p_x) continue if row < 0 or row >= nrow_out: print('y out of dtm boundary', p_y) continue # Update DSM, NDHM if dsm[row, col] < p_z: dsm[row, col] = p_z ndhm[row, col] = p_z - dtm_array[row_dtm, col_dtm] # Save output driver = gdal.GetDriverByName(out_format) ndhm_ds = driver.Create(ndhm_fn, xsize=ncol_out, ysize=nrow_out, bands=1, eType=gdal.GDT_Float32) ndhm_ds.SetProjection(dtm.GetProjection()) ndhm_ds.SetGeoTransform(gt_out) ndhm_ds.GetRasterBand(1).WriteArray(ndhm) ndhm_ds = None # Print elapsed time end = time.time() print('Elapsed time:', int(end - start)) ###Output _____no_output_____
figures_korolev_2008.ipynb
###Markdown Figures Korolev 2008Reproduce some of the figures from the paper. References- Korolev, A., 2008. Rates of phase transformations in mixed-phase clouds. Q.J.R. Meteorol. Soc. 134, 595–608. https://doi.org/10.1002/qj.230 ###Code %load_ext autoreload %autoreload 2 import numpy as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import analytic_formulas as af %matplotlib inline print('-- -15 -------------------------') factors = af.get_factors(af.T0-15, 800e2, verbose=True) print('-- -35 -------------------------') factors = af.get_factors(af.T0-35, 800e2, verbose=True) print(factors.__dict__.keys()) # K08 Fig4 def growth_rates(u, f): """get the growth rates of ice and water (Korolev 2008, Eq. 8, 9)""" Nwrw = 1000 Niri = 10 growth_water = ((f.a0*u-f.bi_s*Niri)*f.Bw*Nwrw)/(f.bw*Nwrw+f.bi*Niri) growth_ice = ((f.a0*u-((1-f.ksi)/f.ksi)*f.bw*Nwrw)*f.Bi*Niri)/(f.bw*Nwrw+f.bi*Niri) return growth_water, growth_ice Ts = np.arange(-40,1,1) params = dict(rho_w='HS', D='HP', Ei='AB', Ew='AB') params = dict() fig, ax = plt.subplots(figsize=(5, 4)) water_growth = growth_rates(1, af.get_factors(273.15+Ts, 900e2, **params))[0] ax.plot(Ts, water_growth, color='b') water_growth = growth_rates(-1, af.get_factors(273.15+Ts, 900e2, **params))[0] ax.plot(Ts, water_growth, '--', color='b') water_growth = growth_rates(1, af.get_factors(273.15+Ts, 500e2, **params))[0] ax.plot(Ts, water_growth, color='r') water_growth = growth_rates(-1, af.get_factors(273.15+Ts, 500e2, **params))[0] ax.plot(Ts, water_growth, '--', color='r') water_growth = growth_rates(1, af.get_factors(273.15+Ts, 300e2, **params))[0] ax.plot(Ts, water_growth, color='g') water_growth = growth_rates(-1, af.get_factors(273.15+Ts, 300e2, **params))[0] ax.plot(Ts, water_growth, '--', color='g') ax.set_ylim([-4e-6, 3e-6]) ax.set_ylabel('growth rate water ') ax.set_xlabel('Temperature [°C]') fig.savefig('plots/K08_fig4b.png', transparent=True) fig, ax = plt.subplots(figsize=(5, 4)) ice_growth = growth_rates(1, af.get_factors(273.15+Ts, 900e2, **params))[1] ax.plot(Ts, ice_growth, color='b') ice_growth = growth_rates(-1, af.get_factors(273.15+Ts, 900e2, **params))[1] ax.plot(Ts, ice_growth, '--', color='b') ice_growth = growth_rates(1, af.get_factors(273.15+Ts, 500e2, **params))[1] ax.plot(Ts, ice_growth, color='r') ice_growth = growth_rates(-1, af.get_factors(273.15+Ts, 500e2, **params))[1] ax.plot(Ts, ice_growth, '--', color='r') ice_growth = growth_rates(1, af.get_factors(273.15+Ts, 300e2, **params))[1] ax.plot(Ts, ice_growth, color='g') ice_growth = growth_rates(-1, af.get_factors(273.15+Ts, 300e2, **params))[1] ax.plot(Ts, ice_growth, '--', color='g') ax.set_ylim([0, 2.5e-6]) ax.set_ylabel('growth rate ice ') ax.set_xlabel('Temperature [°C]') fig.savefig('plots/K08_fig4a.png', transparent=True) u = np.arange(-5,5.1,1) params = dict(rho_w='HS', D='HP', Ei='AB', Ew='AB') params = dict() params_alt = dict(Lw='FB', Li='FB',rho_w='HS', D='fixed', Ei='M', Ew='M') fig, ax = plt.subplots(figsize=(5, 4)) water_growth = growth_rates(u, af.get_factors(273.15-5, 680e2, **params))[0] ax.plot(u, water_growth, color='g') water_growth = growth_rates(u, af.get_factors(273.15-15, 680e2, **params))[0] ax.plot(u, water_growth, color='r') water_growth = growth_rates(u, af.get_factors(273.15-25, 680e2, **params))[0] ax.plot(u, water_growth, color='b') # water_growth = growth_rates(u, get_factors(273.15-25, 680e2, **params_alt))[0] # ax.plot(u, water_growth, '--', color='k') ax.set_xlim([-5, 5]) ax.set_ylim([-6e-6, 6e-6]) ax.set_ylabel('growth rate water ') ax.set_xlabel('u ') fig.savefig('plots/K08_fig2.png', transparent=True) # K08 Fig 5a def get_uz_p(Niri, Nwrw, f): """get the threshold velocity for vapor flux in equilibrium (Korolev 2008, Eq. 15)""" uz_p = ((f.ksi-1)*(f.Bw*f.bi-f.bw*f.Bi)*Nwrw*Niri)/(f.a0*f.ksi*(f.Bw*Nwrw + f.Bi*Niri)) return uz_p Ts = np.arange(-40.1,0.15,1) params = dict(Lw='FB', Li='FB', rho_w='HS',Ei='AB', Ew='AB') params = dict() params_alt = dict(D="VDI") fig, ax = plt.subplots(figsize=(4, 4)) uz_p_T = get_uz_p(0.1, 1000, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_p_T, color='blue') uz_p_T = get_uz_p(0.1, 1000, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_p_T, "--", color='blue') uz_p_T = get_uz_p(1, 1000, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_p_T, color='red') uz_p_T = get_uz_p(1, 1000, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_p_T, "--", color='red') uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_p_T, color='green') uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_p_T, '--', color='green') params_alt = dict(D="VDI") uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 400e2, **params_alt)) ax.plot(Ts, uz_p_T, '--', color='limegreen', lw=0.9) params_alt = dict(D="?") uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 400e2, **params_alt)) ax.plot(Ts, uz_p_T, '--', color='limegreen', lw=0.9) params_alt = dict(D="fixed") uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 400e2, **params_alt)) ax.plot(Ts, uz_p_T, '--', color='limegreen', lw=0.9) params_alt = dict(D="HP") uz_p_T = get_uz_p(10, 1000, af.get_factors(273.15+Ts, 400e2, **params_alt)) ax.plot(Ts, uz_p_T, '--', color='k', lw=0.9) ax.set_yscale('log') ax.set_ylim([1e-6, 1e-1]) ax.set_ylabel('uz_p [m/s]') ax.set_xlabel('Temperature [°C]') fig.savefig('plots/K08_fig5c.png', transparent=True) # K08 Fig 5b def get_uz_0(Nwrw, f): """threshold vertical velocity for ice saturation u_z0 (Korolev 2008 Eq 14)""" uz_0 = (1-f.ksi)*f.bw*Nwrw/(f.ksi*f.a0) #critical (downdraft) velocity for ice evaporation return uz_0 Ts = np.arange(-40.1,0.15,1) params = dict(rho_w='HS', D='HP', Ei='AB', Ew='AB') params = dict() fig, ax = plt.subplots(figsize=(4, 4)) uz_0_T = get_uz_0(100, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), color='blue') uz_0_T = get_uz_0(100, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), "--", color='blue') uz_0_T = get_uz_0(1000, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), color='red') uz_0_T = get_uz_0(1000, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), "--", color='red') uz_0_T = get_uz_0(10000, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), color='green') uz_0_T = get_uz_0(10000, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, -np.log10(-uz_0_T), '--', color='green') # ax.set_yscale('log') ax.set_ylim([-4, 1]) ax.set_ylabel('u_z* [m/s]') ax.set_xlabel('Temperature [°C]') fig.savefig('plots/K08_fig5b.png', transparent=True) # K08 Fig 5a def get_uz_star(Niri, f): """threshold vertical velocity for liquid saturation u_z* (Korolev 2008 Eq 13)""" uz_star = f.bi_s*Niri/f.a0 #critical (updraft) velocity for water growth eta = f.a2*f.Bi_0/f.a0 uz_star2008 = ((f.Ew-f.Ei)/f.Ei)*f.eta*Niri return uz_star Ts = np.arange(-40.1,0.15,1) params = dict(rho_w='HS', D='HP', Ei='AB', Ew='AB') params = dict() params_alt = dict(D='HP') fig, ax = plt.subplots(figsize=(4, 4)) uz_star_T = get_uz_star(0.1, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_star_T, color='blue') uz_star_T = get_uz_star(0.1, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_star_T, "--", color='blue') uz_star_T = get_uz_star(1, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_star_T, color='red') uz_star_T = get_uz_star(1, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_star_T, "--", color='red') uz_star_T = get_uz_star(10, af.get_factors(273.15+Ts, 900e2, **params)) ax.plot(Ts, uz_star_T, color='green') uz_star_T = get_uz_star(10, af.get_factors(273.15+Ts, 400e2, **params)) ax.plot(Ts, uz_star_T, '--', color='green') uz_star_T = get_uz_star(10, af.get_factors(273.15+Ts, 400e2, **params_alt)) ax.plot(Ts, uz_star_T, '--', color='limegreen', lw=0.9) ax.set_yscale('log') ax.set_ylim([1e-4, 1e1]) ax.set_ylabel('u_z* [m/s]') ax.set_xlabel('Temperature [°C]') fig.savefig('plots/K08_fig5a.png', transparent=True) def ratio_change(Nwrw, Niri, u, f): """ratio of growth rate of water and growth rate of ice (Korolev Mazin 2003 Eq 27)""" uz_star = f.bi_s*Niri/f.a0 #critical (updraft) velocity for water growth uz_p = ((f.ksi-1)*(f.Bw*f.bi-f.bw*f.Bi)*Nwrw*Niri)/(f.a0*f.ksi*(f.Bw*Nwrw + f.Bi*Niri)) uz_0 = (1-f.ksi)*f.bw*Nwrw/(f.ksi*f.a0) #critical (downdraft) velocity for ice evaporation ratio_change_qw_qi = ((f.a0*u-f.bi_s*Niri)*f.Bw*Nwrw)/((f.a0*u-(1-f.ksi)/f.ksi*f.bw*Nwrw)*f.Bi*Niri) return {'uz_0': uz_0, 'uz_p': uz_p, 'uz_star': uz_star, 'ratio_change_qw_qi': ratio_change_qw_qi} u = np.arange(-17, 17, 0.33) Niri = 10 Nwrw = 300 params_alt = dict(D='HP') params = dict() uk = ratio_change(Nwrw, Niri, u, af.get_factors(273.15-5, 680e2, **params)) fig, ax = plt.subplots(figsize=(5, 4)) ijump = np.where(np.diff(uk['ratio_change_qw_qi'])<0)[0][0] uk['ratio_change_qw_qi'][ijump] = np.nan ax.axhline(0, color='grey', lw=0.5) ax.axvline(uk['uz_0'], color='black', lw=0.8, ls='--') ax.axvline(uk['uz_p'], color='black', lw=0.8, ls='--') ax.axvline(uk['uz_star'], color='black', lw=0.8, ls='--') ax.plot(u, uk['ratio_change_qw_qi'], '--', label='K08 pub') uk = ratio_change(Nwrw, Niri, u, af.get_factors(273.15-5, 680e2, **params_alt)) ijump = np.where(np.diff(uk['ratio_change_qw_qi'])<0)[0][0] uk['ratio_change_qw_qi'][ijump] = np.nan ax.plot(u, uk['ratio_change_qw_qi'], '--', label='D with HP1976') ax.legend() s = f"Niri {Niri:8.1f}\nNwrw {Nwrw:8.1f}" ax.text(3, -180, s) ax.set_ylim([-200, 200]) ax.set_xlim([-15, 10]) ax.set_ylabel('$\dot q_w$/$\dot q_i$', fontsize=12) ax.set_xlabel('$u_z$ [m s$^{-1}$]', fontsize=12) fig.savefig('plots/K08_fig7.png', transparent=True) ###Output input kwargs: {} input kwargs: {'D': 'HP'}
docs/Viz_CruiseTrack.ipynb
###Markdown *plot_cruise_track(cruise))*Plots cruise trajectory on a geosplatial map.**Note:**This method requires a valid [API key](API.ipynb). It is not necessary to set the API key every time because the API properties are stored locally after being called the first time. > **Parameters:** >> **cruise: list of string**>> A list of cruise official names. If applicable, you may also use cruise "nickname" ('Diel', 'Gradients_1' ...). A full list of cruise names can be retrieved using [cruise](Cruises.ipynb) method.>**Returns: void** >> This method has no retuns. Example ###Code #!pip install pycmap -q #uncomment to install pycmap, if necessary # uncomment the lines below if the API key has not been registered on your machine, previously. # import pycmap # pycmap.API(token='<YOUR_API_KEY>') from pycmap.viz import plot_cruise_track plot_cruise_track(['KM1712', 'gradients_1']) ###Output _____no_output_____
notebooks/n03_kinematics.ipynb
###Markdown Introduction The first step, and notably the most difficult, is to define the kinematic relationships (i.e. motion) among rigid bodies in the system. Here will we make use of `ReferenceFrame` objects to describe the four frames in the problem, set their orientations, and then construct vectors in the frames that position various important `Point`s. Finally, we will specify the linear and angular velocities of the frames and points using generalized speeds. Setup First, import the necessary functions from SymPy that will allow us to construct time varying vectors in the four reference frames. ###Code from __future__ import print_function, division from sympy import symbols, simplify from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point ###Output _____no_output_____ ###Markdown SymPy has a rich printing system. Here we initialize printing so that all of the mathematical equations are rendered in standard mathematical notation. ###Code from sympy.physics.vector import init_vprinting init_vprinting(use_latex='mathjax', pretty_print=False) ###Output _____no_output_____ ###Markdown The IPython notebook can display rich content. We will use the Image function to import some images for reference. ###Code from IPython.display import Image ###Output _____no_output_____ ###Markdown Reference Frames and Orientation Our first step is to define three reference frames, one each for the lower leg, upper leg, and torso. These reference frames hold the information that defines how each frame is oriented relative to each other in addition to their relative angular velocites and angular accelerations. We start by creating four reference frames for each frame in the system. The initialization of the torso frame is left for a later exercise. ###Code inertial_frame = ReferenceFrame('I') lower_leg_frame = ReferenceFrame('L') upper_leg_frame = ReferenceFrame('U') ###Output _____no_output_____ ###Markdown Now we need to specify how the frames are oriented with respect to each other. To do this, we need to define the three generalized coordinates, $\theta_1(t)$, $\theta_2(t)$, and $\theta_3(t)$, for the ankle, knee, and hip angles which are time varying quantities. ###Code theta1, theta2, theta3 = dynamicsymbols('theta1, theta2, theta3') ###Output _____no_output_____ ###Markdown We can start by setting the orientation of the lower leg relative to the inertial reference frame. We'd like the lower leg to rotate through the angle $\theta_1$ relative to the inertial frame. $\theta_1$ is the plantarflexion angle of the ankle. The `ReferenceFrame.orient()` method makes this easy. Simply give the base frame, `Axis` type of rotation, and a tuple containing the rotation angle, and the vector about which to rotate, in this case the Z unit vector of the inertial reference frame. ###Code Image('figures/ankle_joint.png') lower_leg_frame.orient(inertial_frame, 'Axis', (theta1, inertial_frame.z)) ###Output _____no_output_____ ###Markdown We now can see that the direction cosine matrix relating the two reference frames has been set on both frames. ###Code lower_leg_frame.dcm(inertial_frame) inertial_frame.dcm(lower_leg_frame) ###Output _____no_output_____ ###Markdown We can rotate the upper leg relative to the lower leg through the knee flexion angle, $\theta_2$, in a similar fashion. ###Code Image('figures/knee_joint.png') upper_leg_frame.orient(lower_leg_frame, 'Axis', (theta2, lower_leg_frame.z)) ###Output _____no_output_____ ###Markdown Now that we have two reference frames chained, we can show the direction cosine matrices of the uppper leg (one end of the chain) to the inertial reference frame (the other end of the chain). ###Code upper_leg_frame.dcm(inertial_frame) ###Output _____no_output_____ ###Markdown Notice that thedirection cosine matrix isn't in its simplest form. We can be simplified with using the `simplify()` function. ###Code simplify(upper_leg_frame.dcm(inertial_frame)) ###Output _____no_output_____ ###Markdown Exercise Now as an excercise, define a `torso_frame` with the name `T` and rotate it relative to the upper leg through the hip extension angle, $\theta_3$. Lastly show the simplified direction cosine matrix of the torso with respect to the inertial reference frame. ###Code Image('figures/hip_joint.png') # Enter and run your answer here %load exercise_solutions/n03_kinematics_rotate-frame.py ###Output _____no_output_____ ###Markdown Point and Locations To derive the equations of motion of a system we need to have defined the velocities of the mass centers of each rigid body. We will first define points that locate the joints: ankle, knee, and hip to simplify the mass centers of the lower leg, upper leg, and torso. Joints The ankle will be our based point and all other point will be defined with respect to the ankle. The ankle will be considered fixed in the inertial reference frame, i.e. $^I\mathbf{v}^A=0$ ###Code ankle = Point('A') ###Output _____no_output_____ ###Markdown The knee is defined with respect to the ankle with a vector that is the length from the ankle to the knee, $l_L$, in the direction of the lower leg $\hat{\mathbf{l}}_y$ unit vector. We need the constant length and a new point $K$. ###Code lower_leg_length = symbols('l_L') knee = Point('K') ###Output _____no_output_____ ###Markdown The position is set with the `Point.set_pos()` method, giving the reference point and the vector. ###Code knee.set_pos(ankle, lower_leg_length * lower_leg_frame.y) ###Output _____no_output_____ ###Markdown The position from any other point can then be found using the `Point.pos_from()` method. ###Code knee.pos_from(ankle) ###Output _____no_output_____ ###Markdown It is also possible to express the returned vector in another reference frame. For example, you may want to know the position of the knee in the coordinates of the inertial reference frame. ###Code knee.pos_from(ankle).express(inertial_frame).simplify() ###Output _____no_output_____ ###Markdown In a similar fashion the hip is defined with respect to the knee. ###Code upper_leg_length = symbols('l_U') hip = Point('H') hip.set_pos(knee, upper_leg_length * upper_leg_frame.y) hip.pos_from(ankle) ###Output _____no_output_____ ###Markdown The vector from the ankle to the hip point can be expressed in the inertial reference frame with: ###Code hip.pos_from(ankle).express(inertial_frame).simplify() ###Output _____no_output_____ ###Markdown Center of mass locations Exercise The mass centers of the three bodies can be located in a similar fashion. The distances $d_L,d_U,d_T$ locate the mass centers relative to the distal end of the body segments. First define some new symbols: Now define the point $L_o$ and set the position of the `lower_leg_mass_center`. Position the `upper_leg_mass_center` ($U_o$). Position the `torso_mass_center` ($T_o$). ###Code %load exercise_solutions/n03_kinematics_define-com.py ###Output _____no_output_____ ###Markdown Kinematical Differential Equations At this point, we are going to select three generalized speeds, $\omega_1$, $\omega_2$, and $\omega_3$, (the angular velocities of the joints) so that the time derivative of the generalized coordinates are equal to the genearlized speeds. These relationships are called the kinematical differential equations and allow for the remaining equations of motion to easily be derived in first order form. This step is explicit when using Kane's method, which we are going to use. First create the time varying symbols: ###Code omega1, omega2, omega3 = dynamicsymbols('omega1, omega2, omega3') ###Output _____no_output_____ ###Markdown To enforce the relationships: $\omega_n = \dot{\theta}_n$ we define these three equations $\omega_n - \dot{\theta}_n = 0$: ###Code kinematical_differential_equations = [omega1 - theta1.diff(), omega2 - theta2.diff(), omega3 - theta3.diff()] kinematical_differential_equations ###Output _____no_output_____ ###Markdown Angular Velocities Exercise Now we can use the generalized speeds to define the angular velocities of the reference frames. Due to our definitions of rotations these are simply $\omega_n \hat{\mathbf{k}}$. Hint: Remember how we located the joint centers and center of mass locations. The syntax is very similar here. Set the angular velocity of the `lower_leg_frame`. ###Code lower_leg_frame.set_ang_vel(inertial_frame,omega1*inertial_frame.z) lower_leg_frame.ang_vel_in(inertial_frame) ###Output _____no_output_____ ###Markdown Set the angular velocity of the `upper_leg_frame`. Set the angular velocity of the `torso_frame`. ###Code %load exercise_solutions/n03_kinematics_define-angular-velocity.py ###Output _____no_output_____ ###Markdown Linear Velocities Finally, the linear velocities of the mass centers are needed. Starting at the ankle which has a velocity of 0. ###Code ankle.set_vel(inertial_frame, 0) ###Output _____no_output_____ ###Markdown Working our way up the chain we can make use of the fact that the joint points are located on two rigid bodies. Any fixed point in a reference frame can be computed if the linear velocity of another point on that frame is known and the frame's angular velocity is known.$$^I\mathbf{v}^{P_2} = ^I\mathbf{v}^{P_1} + ^I\omega^A \times \mathbf{r}^{\frac{P_2}{P_1}}$$The `Point.v2pt_theory()` method makes it easy to do this calculation. ###Code lower_leg_mass_center.v2pt_theory(ankle, inertial_frame, lower_leg_frame) lower_leg_mass_center.vel(inertial_frame) knee.v2pt_theory(ankle, inertial_frame, lower_leg_frame) knee.vel(inertial_frame) upper_leg_mass_center.v2pt_theory(knee, inertial_frame, upper_leg_frame) upper_leg_mass_center.vel(inertial_frame) hip.v2pt_theory(knee, inertial_frame, upper_leg_frame) hip.vel(inertial_frame) torso_mass_center.v2pt_theory(hip, inertial_frame, torso_frame) torso_mass_center.vel(inertial_frame) ###Output _____no_output_____
doc/examples/using_R.ipynb
###Markdown Using R with pyABC This example illustrates how to use models, summary statistics anddistance functions defined in R. We're assuming you're alreadyfamiliar with the basic workings of pyABC. If not, consult theother tutorial examples. ###Code Download this notebook :download:`Using R with pyABC <using_R.ipynb>`. In this example, we're introducing the new class :class:`R <pyabc.external.R>` which is our interface with R. We use this class to to load an external R script. ###Output _____no_output_____ ###Markdown install if not done yet!pip install pyabc --quiet %matplotlib inlinefrom pyabc.external import Rr = R("myRModel.R") ###Code .. note:: ``R("myRModel.R")`` does, amongst other things, the equivalent to R's ``source("myRModel.R")``. That is, the entire script is executed with all the possible side effects this might have. You can download the file here: :download:`myRModel.R <myRModel.R>`. But now, let's have a look at it. ###Output _____no_output_____ ###Markdown r.display_source_ipython() ###Code We see that four relevant objects are defined in the file. * myModel * mySummaryStatistics (optional) * myDistance * mySumStatData The names of these do not matter. The ``mySummaryStatistics`` is actually optional and can be omitted in case the model calculates the summary statistics directly. We load the defined functions using the ``r`` object: ###Output _____no_output_____ ###Markdown model = r.model("myModel")distance = r.distance("myDistance")sum_stat = r.summary_statistics("mySummaryStatistics") ###Code From there on, we can use them (almost) as if they were ordinary Python functions. ###Output _____no_output_____ ###Markdown import pyabcfrom pyabc import Distribution, RV, ABCSMCpyabc.settings.set_figure_params('pyabc') for beautified plotsprior = Distribution(meanX=RV("uniform", 0, 10), meanY=RV("uniform", 0, 10))abc = ABCSMC(model, prior, distance, summary_statistics=sum_stat) ###Code We also load the observation with ``r.observation`` and pass it to a new ABC run. ###Output _____no_output_____ ###Markdown import osfrom tempfile import gettempdirdb = "sqlite:///" + os.path.join(gettempdir(), "test.db")abc.new(db, r.observation("mySumStatData")) ###Code We start a run which terminates as soon as an acceptance threshold of 0.9 or less is reached or the maximum number of 4 populations is sampled. ###Output _____no_output_____ ###Markdown history = abc.run(minimum_epsilon=0.9, max_nr_populations=4) ###Code Lastly, we plot the results and observe how the generations contract slowly around the oserved value. (Note, that the contraction around the observed value is a particular property of the chosen example and not always the case.) ###Output _____no_output_____ ###Markdown from pyabc.visualization import plot_kde_2dfor t in range(history.n_populations): df, w = abc.history.get_distribution(0, t) ax = plot_kde_2d(df, w, "meanX", "meanY", xmin=0, xmax=10, ymin=0, ymax=10, numx=100, numy=100) ax.scatter([4], [8], edgecolor="black", facecolor="white", label="Observation"); ax.legend(); ax.set_title("PDF t={}".format(t)) ###Code And we can also retrieve summary statistics such as a stored DataFrame, although the DataFrame was acutally defined in R. ###Output _____no_output_____ ###Markdown history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]["cars"].head() ###Code Dumping the results to a file format R can read ----------------------------------------------- Although you could query pyABC's database directly from R since the database is just a SQL database (e.g. SQLite), pyABC ships with a utility for facilitate export of the database. Use the ``abc-dump`` utility provided by pyABC to dump results to file formats such as csv, feather, html, json and others. These can be easiliy read in by R. See `Exporting pyABC's database <../datastore.rst>`_ for how to use this utility. Assume you dumped to the feather format:: abc-export --db results.db --out exported.feather --format feather You could read the results in with the following R snippet .. code:: R install.packages("feather") install.packages("jsonlite") library("feather") library("jsonlite") loadedDf <- data.frame(feather("exported.feather")) jsonStr <- loadedDf$sumstat_ss_df[1] sumStatDf <- fromJSON(jsonStr) If you prefer CSV over the feather format you can also do that. ###Output _____no_output_____ ###Markdown Using R with pyABC This example illustrates how to use models, summary statistics anddistance functions defined in R. We're assuming you're alreadyfamiliar with the basic workings of pyABC. If not, consult theother tutorial examples. ###Code Download this notebook :download:`Using R with pyABC <using_R.ipynb>`. In this example, we're introducing the new class :class:`R <pyabc.external.R>` which is our interface with R. We use this class to to load an external R script. ###Output _____no_output_____ ###Markdown %matplotlib inlinefrom pyabc.external import Rr = R("myRModel.R") ###Code .. note:: ``R("myRModel.R")`` does, amongst other things, the equivalent to R's ``source("myRModel.R")``. That is, the entire script is executed with all the possible side effects this might have. You can download the file here: :download:`myRModel.R <myRModel.R>`. But now, let's have a look at it. ###Output _____no_output_____ ###Markdown r.display_source_ipython() ###Code We see that four relevant objects are defined in the file. * myModel * mySummaryStatistics (optional) * myDistance * mySumStatData The names of these do not matter. The ``mySummaryStatistics`` is actually optional and can be omitted in case the model calculates the summary statistics directly. We load the defined functions using the ``r`` object: ###Output _____no_output_____ ###Markdown model = r.model("myModel")distance = r.distance("myDistance")sum_stat = r.summary_statistics("mySummaryStatistics") ###Code From there on, we can use them (almost) as if they were ordinary Python functions. ###Output _____no_output_____ ###Markdown from pyabc import Distribution, RV, ABCSMCprior = Distribution(meanX=RV("uniform", 0, 10), meanY=RV("uniform", 0, 10))abc = ABCSMC(model, prior, distance, summary_statistics=sum_stat) ###Code We also load the observation with ``r.observation`` and pass it to a new ABC run. ###Output _____no_output_____ ###Markdown import osfrom tempfile import gettempdirdb = "sqlite:///" + os.path.join(gettempdir(), "test.db")abc.new(db, r.observation("mySumStatData")) ###Code We start a run which terminates as soon as an acceptance threshold of 0.9 or less is reached or the maximum number of 4 populations is sampled. ###Output _____no_output_____ ###Markdown history = abc.run(minimum_epsilon=0.9, max_nr_populations=4) ###Code Lastly, we plot the results and observe how the generations contract slowly around the oserved value. (Note, that the contraction around the observed value is a particular property of the chosen example and not always the case.) ###Output _____no_output_____ ###Markdown from pyabc.visualization import plot_kde_2dfor t in range(history.n_populations): df, w = abc.history.get_distribution(0, t) ax = plot_kde_2d(df, w, "meanX", "meanY", xmin=0, xmax=10, ymin=0, ymax=10, numx=100, numy=100) ax.scatter([4], [8], edgecolor="black", facecolor="white", label="Observation"); ax.legend(); ax.set_title("PDF t={}".format(t)) ###Code And we can also retrieve summary statistics such as a stored DataFrame, although the DataFrame was acutally defined in R. ###Output _____no_output_____ ###Markdown history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]["cars"].head() ###Code Dumping the results to a file format R can read ----------------------------------------------- Although you could query pyABC's database directly from R since the database is just a SQL database (e.g. SQLite), pyABC ships with a utility for facilitate export of the database. Use the ``abc-dump`` utility provided by pyABC to dump results to file formats such as csv, feather, html, json and others. These can be easiliy read in by R. See `Exporting pyABC's database <../export_db.rst>`_ for how to use this utility. Assume you dumped to the feather format:: abc-export --db results.db --out exported.feather --format feather You could read the results in with the following R snippet .. code:: R install.packages("feather") install.packages("jsonlite") library("feather") library("jsonlite") loadedDf <- data.frame(feather("exported.feather")) jsonStr <- loadedDf$sumstat_ss_df[1] sumStatDf <- fromJSON(jsonStr) If you prefer CSV over the feather format you can also do that. ###Output _____no_output_____ ###Markdown Using R with pyABC This example illustrates how to use models, summary statistics anddistance functions defined in R. We're assuming you're alreadyfamiliar with the basic workings of pyABC. If not, consult theother tutorial examples. ###Code Download this notebook :download:`Using R with pyABC <using_R.ipynb>`. In this example, we're introducing the new class :class:`R <pyabc.external.R>` which is our interface with R. We use this class to to load an external R script. ###Output _____no_output_____ ###Markdown %matplotlib inlinefrom pyabc.external import Rr = R("myRModel.R") ###Code .. note:: ``R("myRModel.R")`` does, amongst other things, the equivalent to R's ``source("myRModel.R")``. That is, the entire script is executed with all the possible side effects this might have. You can download the file here: :download:`myRModel.R <myRModel.R>`. But now, let's have a look at it. ###Output _____no_output_____ ###Markdown r.display_source_ipython() ###Code We see that four relevant objects are defined in the file. * myModel * mySummaryStatistics (optional) * myDistance * mySumStatData The names of these do not matter. The ``mySummaryStatistics`` is actually optional and can be omitted in case the model calculates the summary statistics directly. We load the defined functions using the ``r`` object: ###Output _____no_output_____ ###Markdown model = r.model("myModel")distance = r.distance("myDistance")sum_stat = r.summary_statistics("mySummaryStatistics") ###Code From there on, we can use them (almost) as if they were ordinary Python functions. ###Output _____no_output_____ ###Markdown from pyabc import Distribution, RV, ABCSMCprior = Distribution(meanX=RV("uniform", 0, 10), meanY=RV("uniform", 0, 10))abc = ABCSMC(model, prior, distance, summary_statistics=sum_stat) ###Code We also load the observation with ``r.observation`` and pass it to a new ABC run. ###Output _____no_output_____ ###Markdown import osfrom tempfile import gettempdirdb = "sqlite:///" + os.path.join(gettempdir(), "test.db")abc.new(db, r.observation("mySumStatData")) ###Code We start a run which terminates as soon as an acceptance threshold of 0.9 or less is reached or the maximum number of 4 populations is sampled. ###Output _____no_output_____ ###Markdown history = abc.run(minimum_epsilon=0.9, max_nr_populations=4) ###Code Lastly, we plot the results and observe how the generations contract slowly around the oserved value. (Note, that the contraction around the observed value is a particular property of the chosen example and not always the case.) ###Output _____no_output_____ ###Markdown from pyabc.visualization import plot_kde_2dfor t in range(history.n_populations): df, w = abc.history.get_distribution(0, t) ax = plot_kde_2d(df, w, "meanX", "meanY", xmin=0, xmax=10, ymin=0, ymax=10, numx=100, numy=100) ax.scatter([4], [8], edgecolor="black", facecolor="white", label="Observation"); ax.legend(); ax.set_title("PDF t={}".format(t)) ###Code And we can also retrieve summary statistics such as a stored DataFrame, although the DataFrame was acutally defined in R. ###Output _____no_output_____ ###Markdown history.get_sum_stats(1, 0)[1][0]["iris"].head() ###Code Dumping the results to a file format R can read ----------------------------------------------- Although you could query pyABC's database directly from R since the database is just a SQL database (e.g. SQLite), pyABC ships with a utility for facilitate export of the database. Use the ``abc-dump`` utility provided by pyABC to dump results to file formats such as csv, feather, html, json and others. These can be easiliy read in by R. See `Exporting pyABC's database <../export_db.rst>`_ for how to use this utility. Assume you dumped to the feather format:: abc-export --db results.db --out exported.feather --format feather You could read the results in with the following R snippet .. code:: R install.packages("feather") install.packages("jsonlite") library("feather") library("jsonlite") loadedDf <- data.frame(feather("exported.feather")) jsonStr <- loadedDf$sumstat_ss_df[1] sumStatDf <- fromJSON(jsonStr) If you prefer CSV over the feather format you can also do that. ###Output _____no_output_____ ###Markdown Using R with pyABC This example illustrates how to use models, summary statistics anddistance functions defined in R. We're assuming you're alreadyfamiliar with the basic workings of pyABC. If not, consult theother tutorial examples. ###Code Download this notebook :download:`Using R with pyABC <using_R.ipynb>`. In this example, we're introducing the new class :class:`R <pyabc.external.R>` which is our interface with R. We use this class to to load an external R script. ###Output _____no_output_____ ###Markdown %matplotlib inlinefrom pyabc.external import Rr = R("myRModel.R") ###Code .. note:: ``R("myRModel.R")`` does, amongst other things, the equivalent to R's ``source("myRModel.R")``. That is, the entire script is executed with all the possible side effects this might have. You can download the file here: :download:`myRModel.R <myRModel.R>`. But now, let's have a look at it. ###Output _____no_output_____ ###Markdown r.display_source_ipython() ###Code We see that four relevant objects are defined in the file. * myModel * mySummaryStatistics (optional) * myDistance * mySumStatData The names of these do not matter. The ``mySummaryStatistics`` is actually optional and can be omitted in case the model calculates the summary statistics directly. We load the defined functions using the ``r`` object: ###Output _____no_output_____ ###Markdown model = r.model("myModel")distance = r.distance("myDistance")sum_stat = r.summary_statistics("mySummaryStatistics") ###Code From there on, we can use them (almost) as if they were ordinary Python functions. ###Output _____no_output_____ ###Markdown from pyabc import Distribution, RV, ABCSMCprior = Distribution(meanX=RV("uniform", 0, 10), meanY=RV("uniform", 0, 10))abc = ABCSMC(model, prior, distance, summary_statistics=sum_stat) ###Code We also load the observation with ``r.observation`` and pass it to a new ABC run. ###Output _____no_output_____ ###Markdown import osfrom tempfile import gettempdirdb = "sqlite:///" + os.path.join(gettempdir(), "test.db")abc.new(db, r.observation("mySumStatData")) ###Code We start a run which terminates as soon as an acceptance threshold of 0.9 or less is reached or the maximum number of 4 populations is sampled. ###Output _____no_output_____ ###Markdown history = abc.run(minimum_epsilon=0.9, max_nr_populations=4) ###Code Lastly, we plot the results and observe how the generations contract slowly around the oserved value. (Note, that the contraction around the observed value is a particular property of the chosen example and not always the case.) ###Output _____no_output_____ ###Markdown from pyabc.visualization import plot_kde_2dfor t in range(history.n_populations): df, w = abc.history.get_distribution(0, t) ax = plot_kde_2d(df, w, "meanX", "meanY", xmin=0, xmax=10, ymin=0, ymax=10, numx=100, numy=100) ax.scatter([4], [8], edgecolor="black", facecolor="white", label="Observation"); ax.legend(); ax.set_title("PDF t={}".format(t)) ###Code And we can also retrieve summary statistics such as a stored DataFrame, although the DataFrame was acutally defined in R. ###Output _____no_output_____ ###Markdown history.get_weighted_sum_stats_for_model(m=0, t=1)[1][0]["cars"].head() ###Code Dumping the results to a file format R can read ----------------------------------------------- Although you could query pyABC's database directly from R since the database is just a SQL database (e.g. SQLite), pyABC ships with a utility for facilitate export of the database. Use the ``abc-dump`` utility provided by pyABC to dump results to file formats such as csv, feather, html, json and others. These can be easiliy read in by R. See `Exporting pyABC's database <../export_db.rst>`_ for how to use this utility. Assume you dumped to the feather format:: abc-export --db results.db --out exported.feather --format feather You could read the results in with the following R snippet .. code:: R install.packages("feather") install.packages("jsonlite") library("feather") library("jsonlite") loadedDf <- data.frame(feather("exported.feather")) jsonStr <- loadedDf$sumstat_ss_df[1] sumStatDf <- fromJSON(jsonStr) If you prefer CSV over the feather format you can also do that. ###Output _____no_output_____
code/python/supp_figure1.ipynb
###Markdown Supplementary material figure 1: Words and nonwords accuracy at each control parameter setting at the end of training Import libraries ###Code import pandas as pd import altair as alt from helper import RawData, control_space_heatmap ###Output _____no_output_____ ###Markdown Import part I raw data ###Code control_params = ["hidden_units", "p_noise", "learning_rate"] raw = RawData("../../data/data_part1_1250.csv") df_word = raw.get(epoch_equal=1.0, word_type="word") df_word = df_word.groupby(control_params).mean().reset_index() df_nonword = raw.get(epoch_equal=1.0, word_type="nonword") df_nonword = df_nonword.groupby(control_params).mean().reset_index() ###Output _____no_output_____ ###Markdown Plot end of training accuracy ###Code plot_word = control_space_heatmap( df_word, title="a. Word", var="score", color_scheme="redyellowgreen", domain=(0.6, 1), font_size=24, epsilon_label="Epsilon (panel column)" ) plot_word plot_nonword = control_space_heatmap( df_nonword, title="b. Nonword", var="score", color_scheme="redyellowgreen", domain=(0.6, 1), font_size=24, epsilon_label="" ) plot_nonword ###Output _____no_output_____
attempts/Traffic_Signs_Recognition-1save.ipynb
###Markdown Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition ClassifierIn this notebook, a template is provided for you to implement your functionality in stages which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission, if necessary. Sections that begin with **'Implementation'** in the header indicate where you should begin your implementation for your project. Note that some sections of implementation are optional, and will be marked with **'Optional'** in the header.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. --- Step 1: Dataset ExplorationVisualize the German Traffic Signs Dataset. This is open ended, some suggestions include: plotting traffic signs images, plotting the count of each sign, etc. Be creative!The pickled data is a dictionary with 4 key/value pairs:- features -> the images pixel values, (width, height, channels)- labels -> the label of the traffic sign- sizes -> the original width and height of the image, (width, height)- coords -> coordinates of a bounding box around the sign in the image, (x1, y1, x2, y2) ###Code # Load pickled data import pickle # TODO: fill this in based on where you saved the training and testing data training_file = 'lab2data/train.p' testing_file = 'lab2data/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_test, y_test = test['features'], test['labels'] ### To start off let's do a basic data summary. # TODO: number of training examples n_train = len(X_train) # TODO: number of testing examples n_test = len(X_test) # TODO: what's the shape of an image? image_shape = X_train[0].shape # TODO: how many classes are in the dataset classes = {} for labelinTraining in y_train: classes[labelinTraining] = classes.get(labelinTraining, 0) + 1 n_classes = len(list(classes)) print("Number of training examples =", n_train) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) #importing some useful packages for plotting and visualization import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inline #import pandas for csv file import #NOTE: install pandas in your environment! #conda install pandas import pandas as pd ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # get the label description from the CSV file. classLabelList = pd.read_csv('signnames.csv') # Analyze Max and Min sizes (number of pixels) of the original image sizeStats = pd.DataFrame(train['sizes']) print("Sizes Stats:") print(sizeStats.describe()) originalSizes = {} mostCommonCount = 0 mostCommonSize = 0 for i in range(len(train['sizes'])): # for each size sizes = train['sizes'][i] # create a pixel hash numpixelsStr = str(int(sizes[0])*int(sizes[1])) # try to see if there is a hash hit sizeInstance = originalSizes.get(numpixelsStr, {'count': 0}) # add to the count count = sizeInstance['count'] + 1 # put in the last Index originalSizes[numpixelsStr] = {'lastIdx':i, 'count': count, 'size':sizes} # update most common size if count > mostCommonCount: mostCommonCount = count mostCommonSize = originalSizes[numpixelsStr] # get the list of sizes and sort them sortedSizes = np.array(list(originalSizes.keys()), np.int32) sortedSizes.sort() # get the unique number of original picture sizes and the min and max last instance n_sizes = len(sortedSizes) minSize = sortedSizes[0] maxSize = sortedSizes[n_sizes-1] # print the stats print("\nNumber of unique original sizes in training set: ", n_sizes) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # draw the min size picture size = originalSizes[str(minSize)]['size'] count = originalSizes[str(minSize)]['count'] index = originalSizes[str(minSize)]['lastIdx'] classId = train['labels'][index] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) fg = plt.figure() fg.text(0.1,-0.01,'Training Set Sample ClassId: %d\nDescription: %s'%(classId,description)) plt.title('Training Set Sample Minimum Original Size (%dX%d), count: %d\n'%(size[0], size[1],count)) plt.imshow(X_train[index]) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # draw the max size picture size = originalSizes[str(maxSize)]['size'] count = originalSizes[str(maxSize)]['count'] index = originalSizes[str(maxSize)]['lastIdx'] classId = train['labels'][index] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) fg = plt.figure() fg.text(0.1,-0.01,'Training Set Sample ClassId: %d\nDescription: %s'%(classId,description)) plt.title('Training Set Sample Maximum Original Size (%dX%d), count: %d\n'%(size[0], size[1],count)) plt.imshow(X_train[index]) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # draw the max size picture size = mostCommonSize['size'] count = mostCommonSize['count'] index = mostCommonSize['lastIdx'] classId = train['labels'][index] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) fg = plt.figure() fg.text(0.1,-0.01,'Training Set Sample ClassId: %d\nDescription: %s'%(classId,description)) plt.title('Training Set Sample Most Common Original Size (%dX%d), count: %d\n'%(size[0], size[1],count)) plt.imshow(X_train[index]) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # x,y plot of the image locations labelsStats = pd.DataFrame(y_train) print("Labels Stats:") print(labelsStats.describe()) labelsInfo = {} mostCommonCount = 0 mostCommonLabel = 0 for i in range(len(y_train)): # for each label label = str(y_train[i]) # try to see if there is a hash hit labelInstance = labelsInfo.get(label, {'count': 0}) # add to the count count = labelInstance['count'] + 1 # put in the last Index labelsInfo[label] = {'lastIdx':i, 'count': count, 'label':int(label)} # update most common size if count > mostCommonCount: mostCommonCount = count mostCommonSize = labelsInfo[label] # get the list of counts and sort them sortedLabels = list(labelsInfo.keys()) def compare_count(label): return labelsInfo[label]['count'] sortedLabels.sort(key=compare_count) # get the unique number of original picture sizes and the min and max last instance n_labels = len(sortedLabels) minLabel = sortedLabels[0] maxLabel = sortedLabels[n_labels-1] # print the stats print("\nNumber of unique labels in training set: ", n_labels) print("\nDistribution of training set labels:") for i in range(n_labels): classId = labelsInfo[str(i)]['label'] index = labelsInfo[str(i)]['lastIdx'] count = labelsInfo[str(i)]['count'] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) print("Training Set count: {0:4d} ClassId: {1:02d} Description: {2}".format(count, classId, description)) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # Plot bar graph of class id count distribution x = range(0, n_labels) y = [labelsInfo[str(n)]['count'] for n in x] fg = plt.figure() ax1 = fg.add_axes((.1,.4,.8,.5)) ax1.bar(x,y,0.5) plt.xlim((-2.0, 44)) plt.ylabel("Counts", fontsize = 12) plt.xlabel("Class Id", fontsize = 12) plt.show() # print the stats print("\nDistribution of training set labels in sorted count order:") for i in range(n_labels): classId = labelsInfo[str(sortedLabels[i])]['label'] index = labelsInfo[str(sortedLabels[i])]['lastIdx'] count = labelsInfo[str(sortedLabels[i])]['count'] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) print("Training Set count: {0:4d} ClassId: {1:02d} Description: {2}".format(count, classId, description)) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # list minimium count for class id and draw a sample picture. classId = labelsInfo[str(minLabel)]['label'] index = labelsInfo[str(minLabel)]['lastIdx'] count = labelsInfo[str(minLabel)]['count'] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) fg = plt.figure() fg.text(0.1,-0.01,'Description: %s'%(description)) plt.title('Training Set Sample Minimum Label Count ClassId: %d Count: %d'%(classId,count)) plt.imshow(X_train[index]) ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. # list maximium count for class id and draw a sample picture. classId = labelsInfo[str(maxLabel)]['label'] index = labelsInfo[str(maxLabel)]['lastIdx'] count = labelsInfo[str(maxLabel)]['count'] description = classLabelList[classLabelList.ClassId==classId].SignName.to_string(header=False,index=False) fg = plt.figure() fg.text(0.1,-0.01,'Description: %s'%(description)) plt.title('Training Set Sample Maximum Label Count ClassId: %d Count: %d'%(classId,count)) plt.imshow(X_train[index]) # It seems that the 'sizes' (width, height) and 'coords' (x1, y1, x2, y2) features have the same values? # Looks like we cannot get any real information from the 'coords' feature. allsame = True for i in range(n_train): if not (train['sizes'][i].all() == train['coords'][i].all()): allsame = False if allsame: print("train['sizes'] == train['coords']!!!\nIgnoring 'coords' feature in train dataset.\n") allsame = True for i in range(n_test): if not (test['sizes'][i].all() == test['coords'][i].all()): allsame = False if allsame: print("test['sizes'] == test['coords']!!!\nIgnoring 'coords' feature in test dataset.\n") ###Output train['sizes'] == train['coords']!!! Ignoring 'coords' feature in train dataset. test['sizes'] == test['coords']!!! Ignoring 'coords' feature in test dataset. ###Markdown ---- Step 2: Design and Test a Model ArchitectureDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).There are various aspects to consider when thinking about this problem:- Your model can be derived from a deep feedforward net or a deep convolutional network.- Play around preprocessing techniques (normalization, rgb to grayscale, etc)- Number of examples per label (some have more than others).- Generate fake data.Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. ImplementationUse the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow. ###Code ### Preprocess the data here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 1 _Describe the techniques used to preprocess the data._ **Answer:** ###Code ### Generate data additional (if you want to!) ### and split the data into training/validation/testing sets here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 2_Describe how you set up the training, validation and testing data for your model. If you generated additional data, why?_ **Answer:** ###Code ### Define your architecture here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 3_What does your final architecture look like? (Type of model, layers, sizes, connectivity, etc.) For reference on how to build a deep neural network using TensorFlow, see [Deep Neural Network in TensorFlow](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/b516a270-8600-4f93-a0a3-20dfeabe5da6/concepts/83a3a2a2-a9bd-4b7b-95b0-eb924ab14432) from the classroom._ **Answer:** ###Code ### Train your model here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 4_How did you train your model? (Type of optimizer, batch size, epochs, hyperparameters, etc.)_ **Answer:** Question 5_What approach did you take in coming up with a solution to this problem?_ **Answer:** --- Step 3: Test a Model on New ImagesTake several pictures of traffic signs that you find on the web or around you (at least five), and run them through your classifier on your computer to produce example results. The classifier might not recognize some local signs but it could prove interesting nonetheless.You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. ImplementationUse the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow. ###Code ### Load the images and plot them here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 6_Choose five candidate images of traffic signs and provide them in the report. Are there any particular qualities of the image(s) that might make classification difficult? It would be helpful to plot the images in the notebook._ **Answer:** ###Code ### Run the predictions here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown Question 7_Is your model able to perform equally well on captured pictures or a live camera stream when compared to testing on the dataset?_ **Answer:** ###Code ### Visualize the softmax probabilities here. ### Feel free to use as many code cells as needed. ###Output _____no_output_____
bruno/1.4-Dora-CatBoost-Baseline-Weight_8.97.ipynb
###Markdown Table of ContentsCatBoost Baseline with pricePreparing our datasetMaximum errorDataset SplittingBRUNO'S CHANGESretrain with best resultsUtilities CatBoost Baseline with priceOriginal notebook by Dora - I (Bruno) added the price/weight ###Code import sys sys.path.append("../dora/models") # For using Dora's utils files import numpy as np import pandas as pd from utils import read_data, process_time, merge_data, promo_detector, promotionAggregation import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as mse import sys import xgboost as xgb import lightgbm as lgb from datetime import datetime from catboost import CatBoostRegressor, Pool, cv NUMBER_OF_LAGS = 4 # sys.path.append("../../main/datasets/") !ls ../main/datasets/ ###Output 1.0v 1.0v.zip ###Markdown Preparing our datasetThese steps were already seen on ```../pre-processing-features``` notebooks. ###Code infos, items, orders = read_data("../main/datasets/") print("Sanity checks...", infos.shape, items.shape, orders.shape) # Changing our time signatures, # adding our promotion feature # and aggregating our data by weeks... process_time(orders) orders = promo_detector(orders) df = promotionAggregation(orders, items) def prepareOrders(orders, items): """This function is responsible for adding in our 'orders' dataframe the items that were not sold. THIS IS NOT MODULARIZED, THUS YOU SHOULD CHANGE THE CODE TO BETTER SUIT YOUR DATASET FEATURES """ df = orders.copy() # Getting the IDs that were never sold not_sold_items = items[np.logical_not( items.itemID.isin(sorted(orders['itemID'].unique())))] new_rows = [] weeks_database = orders['group_backwards'].unique() for idd in df['itemID'].unique(): orders_id = df[df.itemID == idd] example = orders_id.iloc[0] # finding weeks without itemID sales weeks_id = orders_id['group_backwards'].unique() weeks_without_id = np.setdiff1d(weeks_database, weeks_id) # creating new row for w in weeks_without_id: new_rows.append({'itemID': idd, 'group_backwards': w, 'salesPrice_mean': 0, 'customerRating': example['customerRating'], 'category1': example['category1'], 'category2': example['category2'], 'category3': example['category3'], 'recommendedRetailPrice': example['recommendedRetailPrice'], 'orderSum': 0, 'manufacturer': example['manufacturer'], 'brand': example['brand'], 'promotion_mean': 0 }) # Adding rows in every week with the IDs of the # items that were never sold. df = df.append(new_rows) not_sold_orders = pd.DataFrame() for i in range(1, 14): aux = not_sold_items.copy() aux['group_backwards'] = i aux['salesPrice_mean'] = 0 aux['promotion_mean'] = 0 aux['orderSum'] = 0 not_sold_orders = pd.concat([not_sold_orders, aux], axis=0) df = pd.concat([df, not_sold_orders], axis=0).sort_values( ['group_backwards', 'itemID'], ascending=[False, True], ignore_index=True) return df df = prepareOrders(df, items) # This cell lags and diffs our features 'orderSum' and "promotion" shifting = df.copy() for i in range(1, NUMBER_OF_LAGS + 1): # Carrying the data of weeks t-1 shifting[f'orderSum_{i}'] = shifting.groupby('itemID')['orderSum'].shift(i) shifting[f'promotion_mean_{i}'] = shifting.groupby('itemID')['promotion_mean'].shift(i) # Getting the difference of the orders and promotions between weeks t-1 and t-2... shifting[f'orderSum_diff_{i}'] = shifting.groupby('itemID')[f'orderSum_{i}'].diff() shifting[f'promotion_mean_diff_{i}'] = shifting.groupby('itemID')[f'promotion_mean_{i}'].diff() shifting.fillna(0, inplace=True) ###Output _____no_output_____ ###Markdown Maximum errorThe maximum error we could get in this dataset would be just guessing the mean of our sales from weeks 1 to 12, and that's what the cell below is computing. ###Code worst_possible_prediction = shifting.loc[shifting.group_backwards < 13]['orderSum'].mean() prediction = np.full(shifting.loc[shifting.group_backwards == 13]['orderSum'].shape, worst_possible_prediction) # Array filled with the mean... target = shifting.loc[shifting.group_backwards == 13]['orderSum'] print("Guessing the mean of 'orderSum' for all items in target", mse(target, prediction) ** 0.5) ###Output Guessing the mean of 'orderSum' for all items in target 90.29706562119341 ###Markdown Dataset SplittingAll my experiments will use weeks 13 to 3 as a train set, week 2 as our validation set and week 1 as a test set. ###Code # CatBoost requires that all columns should stringColumns = shifting.columns[3:] shifting[stringColumns] = shifting[stringColumns].astype(str) # Datatype conversion required by Catboost train = shifting.loc[shifting.group_backwards >= 3] val = shifting.loc[shifting.group_backwards == 2] test = shifting.loc[shifting.group_backwards == 1] # I recommend to the other members of the team keeping the # datatypes of our datasets as Pandas DataFrames instead of Numpy, # since It will easier to use Boosting Analysis frameworks y_train = train['orderSum'] y_val = val['orderSum'] X_train = train.drop(columns=["orderSum"]) X_val = val.drop(columns=["orderSum"]) ###Output _____no_output_____ ###Markdown --- BRUNO'S CHANGESBelow is my added code:- Create a "weight" vector for catboost so it weights each instance according to it's price- Pass that to the traning itself ###Code # Since I want to keep Dora's original code, I add it # by re-merging into the dataset def recreate_weights(data): weights = pd.merge(data["itemID"], infos[["itemID", "simulationPrice"]], on="itemID", validate="m:1") return weights["simulationPrice"] train_weights = recreate_weights(train) val_weights = recreate_weights(val) ###Output _____no_output_____ ###Markdown // Bruno's changes ###Code # initialize Pool train_pool = Pool(X_train, y_train, cat_features=[8,9,10], weight=train_weights) val_pool = Pool(X_val, y_val, cat_features=[8,9,10], weight=val_weights) # test_pool = Pool(test_data.astype(str), # cat_features=[8,9,10]) # specify the training parameters model = CatBoostRegressor(depth=6, learning_rate=0.1, loss_function='RMSE', early_stopping_rounds=5) model.fit( train_pool, eval_set=val_pool, logging_level='Verbose', # you can uncomment this for text output ); ###Output 0: learn: 88.4535170 test: 46.2494859 best: 46.2494859 (0) total: 95.3ms remaining: 1m 35s 1: learn: 87.5974460 test: 45.7490051 best: 45.7490051 (1) total: 150ms remaining: 1m 15s 2: learn: 86.5484200 test: 45.1574819 best: 45.1574819 (2) total: 195ms remaining: 1m 4s 3: learn: 85.7688129 test: 44.9588048 best: 44.9588048 (3) total: 248ms remaining: 1m 1s 4: learn: 85.3971066 test: 44.8754718 best: 44.8754718 (4) total: 299ms remaining: 59.6s 5: learn: 84.5121910 test: 44.7072428 best: 44.7072428 (5) total: 355ms remaining: 58.9s 6: learn: 84.1558061 test: 44.7293003 best: 44.7072428 (5) total: 400ms remaining: 56.7s 7: learn: 83.8177497 test: 44.8495779 best: 44.7072428 (5) total: 452ms remaining: 56.1s 8: learn: 83.2108046 test: 44.5491347 best: 44.5491347 (8) total: 497ms remaining: 54.7s 9: learn: 82.5838846 test: 44.5633689 best: 44.5491347 (8) total: 540ms remaining: 53.4s 10: learn: 82.4951823 test: 44.3769895 best: 44.3769895 (10) total: 575ms remaining: 51.7s 11: learn: 82.4325665 test: 44.3128282 best: 44.3128282 (11) total: 623ms remaining: 51.3s 12: learn: 82.3893323 test: 44.2905786 best: 44.2905786 (12) total: 663ms remaining: 50.3s 13: learn: 81.6592412 test: 43.7073748 best: 43.7073748 (13) total: 699ms remaining: 49.2s 14: learn: 81.5552088 test: 43.5245217 best: 43.5245217 (14) total: 748ms remaining: 49.1s 15: learn: 81.2889467 test: 43.4166565 best: 43.4166565 (15) total: 794ms remaining: 48.9s 16: learn: 80.9218641 test: 43.5575343 best: 43.4166565 (15) total: 830ms remaining: 48s 17: learn: 80.2287769 test: 43.3502498 best: 43.3502498 (17) total: 875ms remaining: 47.7s 18: learn: 79.7560160 test: 43.1292548 best: 43.1292548 (18) total: 906ms remaining: 46.8s 19: learn: 79.5660288 test: 43.0679275 best: 43.0679275 (19) total: 943ms remaining: 46.2s 20: learn: 79.2779925 test: 43.2493443 best: 43.0679275 (19) total: 985ms remaining: 45.9s 21: learn: 79.2408622 test: 43.2696745 best: 43.0679275 (19) total: 1.04s remaining: 46.1s 22: learn: 78.6930274 test: 43.2411773 best: 43.0679275 (19) total: 1.11s remaining: 47s 23: learn: 78.5928776 test: 43.1958991 best: 43.0679275 (19) total: 1.15s remaining: 46.8s 24: learn: 78.3959980 test: 42.9554467 best: 42.9554467 (24) total: 1.19s remaining: 46.4s 25: learn: 78.2170087 test: 42.9220714 best: 42.9220714 (25) total: 1.25s remaining: 46.7s 26: learn: 78.2170085 test: 42.9223734 best: 42.9220714 (25) total: 1.26s remaining: 45.4s 27: learn: 78.1991274 test: 42.9088386 best: 42.9088386 (27) total: 1.32s remaining: 45.8s 28: learn: 78.1337520 test: 42.8498788 best: 42.8498788 (28) total: 1.38s remaining: 46.2s 29: learn: 78.0724044 test: 42.7756711 best: 42.7756711 (29) total: 1.45s remaining: 46.8s 30: learn: 77.9123742 test: 42.5173384 best: 42.5173384 (30) total: 1.5s remaining: 47.1s 31: learn: 77.8579181 test: 42.4804294 best: 42.4804294 (31) total: 1.57s remaining: 47.5s 32: learn: 77.7752273 test: 42.5798079 best: 42.4804294 (31) total: 1.62s remaining: 47.4s 33: learn: 77.4370100 test: 42.3099674 best: 42.3099674 (33) total: 1.66s remaining: 47.1s 34: learn: 77.3340258 test: 42.2638365 best: 42.2638365 (34) total: 1.7s remaining: 47s 35: learn: 77.3211754 test: 42.2603237 best: 42.2603237 (35) total: 1.72s remaining: 46.1s 36: learn: 77.2910080 test: 42.2808890 best: 42.2603237 (35) total: 1.77s remaining: 46.1s 37: learn: 77.2427667 test: 42.2105555 best: 42.2105555 (37) total: 1.8s remaining: 45.5s 38: learn: 76.9702977 test: 42.0953693 best: 42.0953693 (38) total: 1.83s remaining: 45.1s 39: learn: 76.8025157 test: 42.2361923 best: 42.0953693 (38) total: 1.87s remaining: 44.8s 40: learn: 76.5289369 test: 42.0544070 best: 42.0544070 (40) total: 1.92s remaining: 44.8s 41: learn: 76.4922973 test: 42.1450722 best: 42.0544070 (40) total: 1.95s remaining: 44.4s 42: learn: 76.2594521 test: 41.9522711 best: 41.9522711 (42) total: 1.98s remaining: 44.1s 43: learn: 76.1972761 test: 41.9402526 best: 41.9402526 (43) total: 2.02s remaining: 44s 44: learn: 75.8422947 test: 41.7116846 best: 41.7116846 (44) total: 2.06s remaining: 43.6s 45: learn: 75.8174336 test: 41.6619660 best: 41.6619660 (45) total: 2.09s remaining: 43.4s 46: learn: 75.6641334 test: 41.6359041 best: 41.6359041 (46) total: 2.13s remaining: 43.2s 47: learn: 75.6157239 test: 41.6080805 best: 41.6080805 (47) total: 2.16s remaining: 42.9s 48: learn: 75.5106373 test: 41.3989301 best: 41.3989301 (48) total: 2.2s remaining: 42.7s 49: learn: 75.3702686 test: 41.3682979 best: 41.3682979 (49) total: 2.25s remaining: 42.7s 50: learn: 75.2854091 test: 41.1174195 best: 41.1174195 (50) total: 2.29s remaining: 42.6s 51: learn: 75.2772418 test: 41.1227508 best: 41.1174195 (50) total: 2.33s remaining: 42.5s 52: learn: 75.1323761 test: 40.9636007 best: 40.9636007 (52) total: 2.37s remaining: 42.3s 53: learn: 75.0757103 test: 40.9331375 best: 40.9331375 (53) total: 2.42s remaining: 42.3s 54: learn: 74.9334002 test: 41.0202421 best: 40.9331375 (53) total: 2.46s remaining: 42.2s 55: learn: 74.5956459 test: 41.0086918 best: 40.9331375 (53) total: 2.5s remaining: 42.2s 56: learn: 74.5838553 test: 41.0022404 best: 40.9331375 (53) total: 2.54s remaining: 42s 57: learn: 74.3621362 test: 40.9929094 best: 40.9331375 (53) total: 2.58s remaining: 42s 58: learn: 74.3308611 test: 40.7701029 best: 40.7701029 (58) total: 2.62s remaining: 41.8s 59: learn: 74.2875371 test: 40.6419094 best: 40.6419094 (59) total: 2.65s remaining: 41.6s 60: learn: 73.8150697 test: 40.7785426 best: 40.6419094 (59) total: 2.69s remaining: 41.5s 61: learn: 73.7809482 test: 40.6914132 best: 40.6419094 (59) total: 2.75s remaining: 41.6s 62: learn: 73.6341520 test: 40.6906848 best: 40.6419094 (59) total: 2.79s remaining: 41.6s 63: learn: 73.6340325 test: 40.6924337 best: 40.6419094 (59) total: 2.81s remaining: 41.1s 64: learn: 73.5596900 test: 40.6926393 best: 40.6419094 (59) total: 2.85s remaining: 41s Stopped by overfitting detector (5 iterations wait) bestTest = 40.64190945 bestIteration = 59 Shrink model to first 60 iterations. ###Markdown retrain with best results(more Bruno changes) ###Code full_train = shifting.loc[shifting.group_backwards >= 2] full_train_weights = recreate_weights(full_train) full_y_train = full_train['orderSum'] full_X_train = full_train.drop(columns=["orderSum"]) # initialize Pool full_train_pool = Pool(full_X_train, full_y_train, cat_features=[8,9,10], weight=full_train_weights) # specify the training parameters bst = CatBoostRegressor( depth=6, learning_rate=0.1, loss_function='RMSE', iterations=model.best_iteration_, ) bst.fit( full_train_pool, # logging_level='Verbose', # you can uncomment this for text output ); ###Output 0: learn: 118.9781037 total: 30.5ms remaining: 1.77s 1: learn: 118.2637877 total: 60.4ms remaining: 1.72s 2: learn: 116.8795915 total: 89.7ms remaining: 1.67s 3: learn: 116.2336209 total: 113ms remaining: 1.55s 4: learn: 115.7210617 total: 146ms remaining: 1.57s 5: learn: 115.5718148 total: 181ms remaining: 1.59s 6: learn: 114.8061068 total: 214ms remaining: 1.59s 7: learn: 114.7202624 total: 250ms remaining: 1.59s 8: learn: 114.4158380 total: 271ms remaining: 1.51s 9: learn: 114.2614520 total: 293ms remaining: 1.43s 10: learn: 113.7582839 total: 320ms remaining: 1.4s 11: learn: 113.7582836 total: 329ms remaining: 1.29s 12: learn: 113.4377759 total: 352ms remaining: 1.24s 13: learn: 113.3189882 total: 374ms remaining: 1.2s 14: learn: 112.9825687 total: 401ms remaining: 1.18s 15: learn: 112.5769610 total: 431ms remaining: 1.16s 16: learn: 112.5013714 total: 455ms remaining: 1.12s 17: learn: 110.8347226 total: 481ms remaining: 1.09s 18: learn: 110.7557037 total: 504ms remaining: 1.06s 19: learn: 110.6977974 total: 526ms remaining: 1.02s 20: learn: 110.6421274 total: 549ms remaining: 993ms 21: learn: 110.6100911 total: 573ms remaining: 964ms 22: learn: 110.5542304 total: 596ms remaining: 933ms 23: learn: 110.4338184 total: 631ms remaining: 920ms 24: learn: 110.4331732 total: 650ms remaining: 885ms 25: learn: 110.4182349 total: 675ms remaining: 857ms 26: learn: 109.7542793 total: 709ms remaining: 841ms 27: learn: 109.7209812 total: 732ms remaining: 810ms 28: learn: 109.4959318 total: 755ms remaining: 782ms 29: learn: 109.4954764 total: 771ms remaining: 745ms 30: learn: 109.4029024 total: 800ms remaining: 722ms 31: learn: 109.2220084 total: 827ms remaining: 698ms 32: learn: 109.1740675 total: 856ms remaining: 674ms 33: learn: 108.9631541 total: 882ms remaining: 649ms 34: learn: 108.9021532 total: 910ms remaining: 624ms 35: learn: 108.8943066 total: 938ms remaining: 599ms 36: learn: 108.6003650 total: 958ms remaining: 570ms 37: learn: 107.5086658 total: 983ms remaining: 543ms 38: learn: 106.5660982 total: 1.01s remaining: 520ms 39: learn: 106.5029343 total: 1.04s remaining: 494ms 40: learn: 106.5005445 total: 1.06s remaining: 467ms 41: learn: 105.8691671 total: 1.09s remaining: 441ms 42: learn: 105.6545367 total: 1.11s remaining: 413ms 43: learn: 105.5620576 total: 1.13s remaining: 386ms 44: learn: 105.4838258 total: 1.16s remaining: 361ms 45: learn: 105.4517365 total: 1.19s remaining: 337ms 46: learn: 105.3849327 total: 1.22s remaining: 312ms 47: learn: 105.3088106 total: 1.25s remaining: 285ms 48: learn: 105.2612687 total: 1.28s remaining: 261ms 49: learn: 105.2122073 total: 1.31s remaining: 236ms 50: learn: 104.6740308 total: 1.33s remaining: 209ms 51: learn: 104.6578213 total: 1.34s remaining: 181ms 52: learn: 104.0318913 total: 1.37s remaining: 155ms 53: learn: 104.0069519 total: 1.4s remaining: 130ms 54: learn: 104.0008452 total: 1.42s remaining: 103ms 55: learn: 103.8490999 total: 1.45s remaining: 77.5ms 56: learn: 103.8280764 total: 1.47s remaining: 51.7ms 57: learn: 103.8086009 total: 1.5s remaining: 25.8ms 58: learn: 103.6659658 total: 1.52s remaining: 0us ###Markdown Utilities **Predicting at test time** ###Code y_test = test['orderSum'] X_test = test.drop(columns=["orderSum"]) final_predictions = bst.predict(X_test) final_predictions[final_predictions < 0].shape ###Output _____no_output_____ ###Markdown **Creating our Kaggle CSV** ###Code final = pd.Series(0, index=np.arange(1, len(items)+1)) final[items.itemID] = final_predictions.astype(int) final.to_csv("cat_with_weights_kaggle_df.csv", header=["demandPrediction"], index_label="itemID", sep="|") ###Output _____no_output_____
common/schedule_test.ipynb
###Markdown **Targets*** 30 : 0.4* 60 : 0.04* 80 : 0.004 ###Code schedule = schedules.WarmupLinearDecay( epoch_steps=train_steps, base_lr=0.5, min_lr=1e-5, warmup_epochs=30, flat_epochs=30, max_epochs=max_epochs, ) step_list = [step*train_steps for step in range(90+1)] lr_list = [schedule(step) for step in step_list] plt.title("Learning Rate Schedule") plt.plot(step_list, lr_list) plt.axvline(30*train_steps) plt.axvline(60*train_steps) plt.axvline(80*train_steps) plt.show() for i, lr in enumerate(lr_list): if i in [30, 60, 80]: print(i, float(lr)) ###Output _____no_output_____